diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 4d67ff6615227f0d9e7d82e2f69b39d72011a75c..c5ea4c54fef7b739a4ad33dd3759c3bdef124038 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -21,7 +21,7 @@ class DeviceTypeEnum(Enum): DATACENTER = 'datacenter' MICROVAWE_RADIO_SYSTEM = 'microwave-radio-system' OPTICAL_ROADM = 'optical-roadm' - OPTICAL_TRANDPONDER = 'optical-trandponder' + OPTICAL_TRANSPONDER = 'optical-transponder' OPEN_LINE_SYSTEM = 'open-line-system' PACKET_ROUTER = 'packet-router' PACKET_SWITCH = 'packet-switch' diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py index 41d58caa4dfcb9172f72ad3e3e8dbde5426b68b6..daa9f4fe3a170874fcbb58d875626e3009a95d3c 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py @@ -17,6 +17,8 @@ DEFAULT_ADDRESS_FAMILIES = ['IPV4'] DEFAULT_BGP_AS = 65000 DEFAULT_BGP_ROUTE_TARGET = '{:d}:{:d}'.format(DEFAULT_BGP_AS, 333) +# TODO: improve definition of bearer mappings + # Bearer mappings: # device_uuid:endpoint_uuid => ( # device_uuid, endpoint_uuid, router_id, route_distinguisher, sub_if_index, address_ip, address_prefix) @@ -56,8 +58,9 @@ BEARER_MAPPINGS = { #'R4@D2:3/3': ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24), # ECOC'22 - 'DC1-GW:CS1-GW1': ('CS1-GW1', '10/1', '10.0.1.101', '65000:101', 300, None, None), - 'DC1-GW:CS1-GW2': ('CS1-GW2', '10/1', '10.0.2.101', '65000:102', 300, None, None), - 'DC2-GW:CS2-GW1': ('CS2-GW1', '10/1', '10.0.1.102', '65000:103', 300, None, None), - 'DC2-GW:CS2-GW2': ('CS2-GW2', '10/1', '10.0.2.102', '65000:104', 300, None, None), + # bearer_ref => device_uuid, endpoint_uuid, sub_if_index, router_id, remote_router, circuit_id + 'DC1-GW:CS1-GW1': ('CS1-GW1', '10/1', 0, '5.5.1.1', '5.5.2.1', 111), + 'DC1-GW:CS1-GW2': ('CS1-GW2', '10/1', 0, '5.5.1.2', '5.5.2.2', 222), + 'DC2-GW:CS2-GW1': ('CS2-GW1', '10/1', 0, '5.5.2.1', '5.5.1.1', 111), + 'DC2-GW:CS2-GW2': ('CS2-GW2', '10/1', 0, '5.5.2.2', '5.5.1.2', 222), } diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index 9e4527f80baec59806601c47089ea31f49a9b1db..224ebf094243fccf56367c14a15831edcc975f07 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -91,7 +91,10 @@ class L2VPN_Service(Resource): response.status_code = HTTP_NOCONTENT return response - raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) + LOGGER.warning('VPN({:s}) not found in database. Nothing done.'.format(str(vpn_id))) + response = jsonify({}) + response.status_code = HTTP_NOCONTENT + return response except Exception as e: # pylint: disable=broad-except LOGGER.exception('Something went wrong Deleting VPN({:s})'.format(str(vpn_id))) response = jsonify({'error': str(e)}) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py index 7b959b2895d0f0acd27058fcb5e9a571cf6553d2..50b1c2abbb2b49ed9b8cb84a3a0933df55d3bd8f 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py @@ -44,7 +44,11 @@ class L2VPN_Services(Resource): vpn_services : List[Dict] = request_data['ietf-l2vpn-svc:vpn-service'] for vpn_service in vpn_services: try: - vpn_service_type = vpn_service['vpn-svc-type'] + # By now, assume requests from OSM always need transport slices + # TODO: think how to differentiate + #vpn_service_type = vpn_service['vpn-svc-type'] + vpn_service_type = 'vpls' + if vpn_service_type == 'vpws': # pylint: disable=no-member service_request = Service() diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 2e0900534d3cfa29eb6404489ff4d1c98a3e5d64..0dea176972ab06156dbcee875e7b857a0b4f8c95 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -65,7 +65,9 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s if mapping is None: msg = 'Specified Bearer({:s}) is not configured.' raise Exception(msg.format(str(bearer_reference))) - device_uuid,endpoint_uuid,router_id,route_distinguisher,sub_if_index,address_ip,address_prefix = mapping + #device_uuid,endpoint_uuid,router_id,route_dist,sub_if_index,address_ip,address_prefix = mapping + route_dist, address_ip, address_prefix = None, None, None + device_uuid, endpoint_uuid, sub_if_index, router_id, remote_router, circuit_id = mapping target : Union[Service, Slice, None] = None if target is None: target = get_slice (context_client, vpn_id) @@ -88,20 +90,21 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s service_settings_key = '/settings' update_config_rule_custom(config_rules, service_settings_key, { 'mtu' : (DEFAULT_MTU, True), - 'address_families': (DEFAULT_ADDRESS_FAMILIES, True), - 'bgp_as' : (DEFAULT_BGP_AS, True), - 'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), + #'address_families': (DEFAULT_ADDRESS_FAMILIES, True), + #'bgp_as' : (DEFAULT_BGP_AS, True), + #'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), }) endpoint_settings_key = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) - field_updates = { - 'router_id' : (router_id, True), - 'route_distinguisher': (route_distinguisher, True), - 'sub_interface_index': (sub_if_index, True), - 'vlan_id' : (cvlan_id, True), - } - if address_ip is not None: field_updates['address_ip' ] = (address_ip, True) - if address_prefix is not None: field_updates['address_prefix' ] = (address_prefix, True) + field_updates = {} + if router_id is not None: field_updates['router_id' ] = (router_id, True) + if route_dist is not None: field_updates['route_distinguisher'] = (route_dist, True) + if sub_if_index is not None: field_updates['sub_interface_index'] = (sub_if_index, True) + if cvlan_id is not None: field_updates['vlan_id' ] = (cvlan_id, True) + if address_ip is not None: field_updates['address_ip' ] = (address_ip, True) + if address_prefix is not None: field_updates['address_prefix' ] = (address_prefix, True) + if remote_router is not None: field_updates['remote_router' ] = (remote_router, True) + if circuit_id is not None: field_updates['circuit_id' ] = (circuit_id, True) update_config_rule_custom(config_rules, endpoint_settings_key, field_updates) field_updates = {} diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 6188a385da391af74272eee3be1e467c15f97702..75e315b37a41edacf6f200575eae5dc44c51d642 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -31,6 +31,7 @@ DRIVERS = [ FilterFieldEnum.DEVICE_TYPE: [ DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, DeviceTypeEnum.EMULATED_PACKET_ROUTER, + DeviceTypeEnum.PACKET_ROUTER, # temporal ECOC'22 ], FilterFieldEnum.DRIVER : [ ORM_DeviceDriverEnum.UNDEFINED, @@ -39,13 +40,13 @@ DRIVERS = [ ], } ]), - (OpenConfigDriver, [ - { - # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, - } - ]), + #(OpenConfigDriver, [ # temporal ECOC'22 + # { + # # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver + # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, + # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, + # } + #]), (TransportApiDriver, [ { # Real OLS, specifying TAPI Driver => use TransportApiDriver @@ -53,17 +54,17 @@ DRIVERS = [ FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.TRANSPORT_API, } ]), - (P4Driver, [ - { - # Real P4 Switch, specifying P4 Driver => use P4Driver - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.P4, - } - ]), - (IETFApiDriver, [ - { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, - } - ]), + #(P4Driver, [ # temporal ECOC'22 + # { + # # Real P4 Switch, specifying P4 Driver => use P4Driver + # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, + # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.P4, + # } + #]), + #(IETFApiDriver, [ # temporal ECOC'22 + # { + # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, + # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, + # } + #]), ] diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index dd41096ec25fb74f1b1b855c98f90e09fee33194..c35ae9b9dc610572f77f8e139c5c6f0c76f48e77 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -66,6 +66,7 @@ class NetconfSessionHandler: self.__look_for_keys = settings.get('look_for_keys', True) self.__allow_agent = settings.get('allow_agent', True) self.__force_running = settings.get('force_running', False) + self.__commit_per_delete = settings.get('delete_rule', False) self.__device_params = settings.get('device_params', {}) self.__manager_params = settings.get('manager_params', {}) self.__nc_params = settings.get('nc_params', {}) @@ -90,6 +91,9 @@ class NetconfSessionHandler: @property def use_candidate(self): return self.__candidate_supported and not self.__force_running + @property + def commit_per_rule(self): return self.__commit_per_delete + @RETRY_DECORATOR def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin with self.__lock: @@ -181,8 +185,8 @@ def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples : LOGGER.exception('Error retrieving samples') def edit_config( - netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, target='running', - default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin + netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False, + target='running', default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin ): str_method = 'DeleteConfig' if delete else 'SetConfig' LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources))) @@ -202,6 +206,8 @@ def edit_config( netconf_handler.edit_config( config=str_config_message, target=target, default_operation=default_operation, test_option=test_option, error_option=error_option, format=format) + if commit_per_rule: + netconf_handler.commit() results[i] = True except Exception as e: # pylint: disable=broad-except str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting') @@ -278,12 +284,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate') - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate') + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources) return results @@ -294,12 +303,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources, delete=True) return results diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py index c11b1669d5b4cf3ca47986817ded28f75ae8358f..718a02d193531924bef863f5ccd2cbb999388dbd 100644 --- a/src/device/service/drivers/openconfig/templates/EndPoints.py +++ b/src/device/service/drivers/openconfig/templates/EndPoints.py @@ -20,7 +20,7 @@ from .Tools import add_value_from_collection, add_value_from_tag LOGGER = logging.getLogger(__name__) -XPATH_PORTS = "//ocp:components/ocp:component/ocp:state[ocp:type='PORT']/.." +XPATH_PORTS = "//ocp:components/ocp:component" XPATH_IFACE_COUNTER = "//oci:interfaces/oci:interface[oci:name='{:s}']/state/counters/{:s}" def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: @@ -28,6 +28,13 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: for xml_component in xml_data.xpath(XPATH_PORTS, namespaces=NAMESPACES): #LOGGER.info('xml_component = {:s}'.format(str(ET.tostring(xml_component)))) + component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES) + if component_type is None or component_type.text is None: continue + component_type = component_type.text + if component_type not in {'PORT', 'oc-platform-types:PORT'}: continue + + LOGGER.info('PORT xml_component = {:s}'.format(str(ET.tostring(xml_component)))) + endpoint = {} component_name = xml_component.find('ocp:name', namespaces=NAMESPACES) diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py index 70b50dae53ba22eb6c8df018fb5663cce0bc125e..76b49bc8bd4a5ded840ccad13f0941d05070d344 100644 --- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py @@ -194,9 +194,13 @@ class KDisjointPathAlgorithm(_Algorithm): grpc_services[service_key] = self.add_service_to_reply(reply, context_uuid, service_uuid) for num_path,service_path_ero in enumerate(paths): + self.logger.warning('num_path={:d}'.format(num_path)) + self.logger.warning('service_path_ero={:s}'.format(str(service_path_ero))) if service_path_ero is None: continue path_hops = eropath_to_hops(service_path_ero, self.endpoint_to_link_dict) + self.logger.warning('path_hops={:s}'.format(str(path_hops))) connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid) + self.logger.warning('connections={:s}'.format(str(connections))) for connection in connections: connection_uuid,device_layer,path_hops,_ = connection @@ -221,8 +225,8 @@ class KDisjointPathAlgorithm(_Algorithm): grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops) grpc_connections[connection_uuid] = grpc_connection - for service_uuid in dependencies: - sub_service_key = (context_uuid, service_uuid) + for sub_service_uuid in dependencies: + sub_service_key = (context_uuid, sub_service_uuid) grpc_sub_service = grpc_services.get(sub_service_key) if grpc_sub_service is None: raise Exception('Service({:s}) not found'.format(str(sub_service_key))) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index bb96ff354ef32cb0a269d2b678fdb9552d86939d..b798813a83d984d6d1d75450529e9c826e220624 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, requests, uuid +import json, logging, requests from typing import Dict, List, Optional, Tuple -from common.proto.context_pb2 import Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum +from common.proto.context_pb2 import ( + ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, + ServiceTypeEnum) from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.ConfigRule import json_config_rule_set from pathcomp.frontend.Config import BACKEND_URL from pathcomp.frontend.service.algorithms.tools.ConstantsMappings import DEVICE_LAYER_TO_SERVICE_TYPE, DeviceLayerEnum from .tools.EroPathToHops import eropath_to_hops @@ -156,6 +158,17 @@ class _Algorithm: raise Exception(MSG.format(str(device_layer))) service.service_type = service_type + if service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: + json_tapi_settings = { + 'capacity_value' : 50.0, + 'capacity_unit' : 'GHz', + 'layer_proto_name': 'PHOTONIC_MEDIA', + 'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC', + 'direction' : 'UNIDIRECTIONAL', + } + config_rule = ConfigRule(**json_config_rule_set('/settings', json_tapi_settings)) + service.service_config.config_rules.append(config_rule) + service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED if path_hops is not None and len(path_hops) > 0: diff --git a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py index 5e4f5408398cca012dca52fb19bf11a2b84a5721..2ff97b96c2a33e77745239b5f944cecb19639b1d 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py @@ -92,7 +92,7 @@ DEVICE_TYPE_TO_LAYER = { DeviceTypeEnum.OPEN_LINE_SYSTEM.value : DeviceLayerEnum.OPTICAL_CONTROLLER, DeviceTypeEnum.OPTICAL_ROADM.value : DeviceLayerEnum.OPTICAL_DEVICE, - DeviceTypeEnum.OPTICAL_TRANDPONDER.value : DeviceLayerEnum.OPTICAL_DEVICE, + DeviceTypeEnum.OPTICAL_TRANSPONDER.value : DeviceLayerEnum.OPTICAL_DEVICE, } DEVICE_LAYER_TO_SERVICE_TYPE = { diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py index be0f1fda56e0fe3248b625595dcf7be806c86c68..d173f3f270ebbd18a6440f55f7e82cf66720c03a 100644 --- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Dict, List -from common.proto.context_pb2 import EndPointId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode @@ -22,136 +21,57 @@ def setup_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: - connection_short_uuid = connection_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(connection_short_uuid) - network_interface_desc = '{:s}-NetIf'.format(connection_uuid) - network_subinterface_desc = '{:s}-NetSubIf'.format(connection_uuid) + json_settings : Dict = {} if service_settings is None else service_settings.value + json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - if service_settings is None: - # MSG = 'Unable to retrieve settings for Service({:s})' - #raise Exception(MSG.format(connection_uuid)) - mtu = 1450 - bgp_as = 0 - bgp_route_target = '0:0' - else: - json_settings : Dict = service_settings.value - mtu = json_settings.get('mtu', 1450 ) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - bgp_as = json_settings.get('bgp_as', 0 ) # 65000 - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 + #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - if endpoint_settings is None: - #MSG = 'Unable to retrieve settings for device({:s}/endpoint({:s}) in service({:s})' - #raise Exception(MSG.format(device_uuid, endpoint_uuid, connection_uuid)) - route_distinguisher = '0:0' - sub_interface_index = 0 - vlan_id = 1 - address_ip = '0.0.0.0' - address_prefix = 24 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) - else: - json_endpoint_settings : Dict = endpoint_settings.value - #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 - vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 - address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' - address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' + circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' + + if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) + connection_point_id = 'VC-1' json_config_rules = [ json_config_rule_set( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF', - 'route_distinguisher': route_distinguisher, - #'router_id': router_id, 'address_families': address_families, - }), - json_config_rule_set( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu, - }), - json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, - 'description': network_subinterface_desc, 'vlan_id': vlan_id, - 'address_ip': address_ip, 'address_prefix': address_prefix, - }), - json_config_rule_set( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid, - 'subinterface': sub_interface_index, - }), - json_config_rule_set( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as, - }), - json_config_rule_set( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', - }), - json_config_rule_set( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', - }), + '/network_instance[default]', + {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), + json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name), - }), + '/network_instance[default]/protocols[OSPF]', + {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), + json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - }), + '/network_instance[default]/protocols[STATIC]', + {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), + json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), + '/network_instance[{:s}]'.format(network_instance_name), + {'name': network_instance_name, 'type': 'L2VSI'}), + json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), + '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}), + json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', - }), + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name), - }), + '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), + {'name': network_instance_name, 'connection_point': connection_point_id, 'VC_ID': circuit_id, + 'remote_system': remote_router}), ] - return json_config_rules def teardown_config_rules( @@ -159,110 +79,54 @@ def teardown_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: - connection_short_uuid = connection_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(connection_short_uuid) + json_settings : Dict = {} if service_settings is None else service_settings.value + json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - if service_settings is None: - # MSG = 'Unable to retrieve settings for Service({:s})' - #raise Exception(MSG.format(connection_uuid)) - bgp_route_target = '0:0' - else: - json_settings : Dict = service_settings.value - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 + #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - if endpoint_settings is None: - #MSG = 'Unable to retrieve settings for device({:s}/endpoint({:s}) in service({:s})' - #raise Exception(MSG.format(device_uuid, endpoint_uuid, connection_uuid)) - sub_interface_index = 0 - vlan_id = 1 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) - else: - json_endpoint_settings : Dict = endpoint_settings.value - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 - vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' + circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' + + if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) + connection_point_id = 'VC-1' json_config_rules = [ json_config_rule_delete( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, - }), - json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, - }), - json_config_rule_delete( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', - }), - json_config_rule_delete( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), + '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), + {'name': network_instance_name, 'connection_point': connection_point_id}), + json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - }), + '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + {'name': if_cirid_name, 'index': sub_interface_index}), + json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), + '/network_instance[{:s}]'.format(network_instance_name), + {'name': network_instance_name}), + json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), + '/network_instance[default]/protocols[STATIC]', + {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), + json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - }), + '/network_instance[default]/protocols[OSPF]', + {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), + json_config_rule_delete( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name - }), + '/network_instance[default]', + {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), ] return json_config_rules diff --git a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py index 1249af0ae7944f09bd12f2fab4e6e78523320c06..aeba6a26ab5d3fdc42925bcd9bda0a3c5790ece4 100644 --- a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py +++ b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py @@ -14,58 +14,54 @@ import anytree, json, logging from typing import Any, Dict, List, Optional, Tuple, Union -from common.orm.Database import Database from common.orm.HighLevel import get_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import Device +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, Device, DeviceId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from service.service.database.ConfigModel import ORM_ConfigActionEnum, get_config_rules -from service.service.database.ContextModel import ContextModel from service.service.database.DeviceModel import DeviceModel -from service.service.database.ServiceModel import ServiceModel from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value +from service.service.task_scheduler.TaskExecutor import TaskExecutor LOGGER = logging.getLogger(__name__) class TapiServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called - self, db_service : ServiceModel, database : Database, context_client : ContextClient, - device_client : DeviceClient, **settings + self, service : Service, task_executor : TaskExecutor, **settings ) -> None: - self.__db_service = db_service - self.__database = database - self.__context_client = context_client # pylint: disable=unused-private-member - self.__device_client = device_client - - self.__db_context : ContextModel = get_object(self.__database, ContextModel, self.__db_service.context_fk) - str_service_key = key_to_str([self.__db_context.context_uuid, self.__db_service.service_uuid]) - db_config = get_config_rules(self.__database, str_service_key, 'running') + self.__service = service + self.__task_executor = task_executor # pylint: disable=unused-private-member self.__resolver = anytree.Resolver(pathattr='name') self.__config = TreeNode('.') - for action, resource_key, resource_value in db_config: - if action == ORM_ConfigActionEnum.SET: + for config_rule in service.service_config.config_rules: + action = config_rule.action + if config_rule.WhichOneof('config_rule') != 'custom': continue + resource_key = config_rule.custom.resource_key + resource_value = config_rule.custom.resource_value + if action == ConfigActionEnum.CONFIGACTION_SET: try: resource_value = json.loads(resource_value) except: # pylint: disable=bare-except pass set_subnode_value(self.__resolver, self.__config, resource_key, resource_value) - elif action == ORM_ConfigActionEnum.DELETE: + elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) - def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.info('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.info('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] - service_uuid = self.__db_service.service_uuid - service_settings : TreeNode = get_subnode(self.__resolver, self.__config, 'settings', None) - if service_settings is None: raise Exception('Unable to settings for Service({:s})'.format(str(service_uuid))) + service_uuid = self.__service.service_id.service_uuid.uuid + settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None) + if settings is None: raise Exception('Unable to retrieve settings for Service({:s})'.format(str(service_uuid))) - json_settings : Dict = service_settings.value - capacity_value = json_settings.get('capacity_value', 1) + json_settings : Dict = settings.value + capacity_value = json_settings.get('capacity_value', 50.0) capacity_unit = json_settings.get('capacity_unit', 'GHz') layer_proto_name = json_settings.get('layer_proto_name', 'PHOTONIC_MEDIA') layer_proto_qual = json_settings.get('layer_proto_qual', 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC') @@ -74,46 +70,44 @@ class TapiServiceHandler(_ServiceHandler): results = [] try: device_uuid = endpoints[0][0] - db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) - json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) - json_device_config : Dict = json_device.setdefault('device_config', {}) - json_device_config_rules : List = json_device_config.setdefault('config_rules', []) - json_device_config_rules.extend([ - json_config_rule_set('/service[{:s}]'.format(service_uuid), { - 'uuid' : service_uuid, - 'input_sip' : endpoints[0][1], - 'output_sip' : endpoints[1][1], - 'capacity_unit' : capacity_unit, - 'capacity_value' : capacity_value, - 'layer_protocol_name' : layer_proto_name, - 'layer_protocol_qualifier': layer_proto_qual, - 'direction' : direction, - }), - ]) - self.__device_client.ConfigureDevice(Device(**json_device)) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + json_config_rule = json_config_rule_set('/service[{:s}]'.format(service_uuid), { + 'uuid' : service_uuid, + 'input_sip' : endpoints[0][1], + 'output_sip' : endpoints[1][1], + 'capacity_unit' : capacity_unit, + 'capacity_value' : capacity_value, + 'layer_protocol_name' : layer_proto_name, + 'layer_protocol_qualifier': layer_proto_qual, + 'direction' : direction, + }) + del device.device_config.config_rules[:] + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) results.append(True) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid))) + LOGGER.exception('Unable to configure Service({:s})'.format(str(service_uuid))) results.append(e) return results - def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.info('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.info('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] - service_uuid = self.__db_service.service_uuid + service_uuid = self.__service.service_id.service_uuid.uuid results = [] try: device_uuid = endpoints[0][0] - db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) - json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) - json_device_config : Dict = json_device.setdefault('device_config', {}) - json_device_config_rules : List = json_device_config.setdefault('config_rules', []) - json_device_config_rules.extend([ - json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid}) - ]) - self.__device_client.ConfigureDevice(Device(**json_device)) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + json_config_rule = json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid}) + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to DeleteEndpoint for Service({:s})'.format(str(service_uuid))) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index 9f102f558121e4eb8f7fa9c89dd1b637c92ac8a4..1df13a0f469cd577f1a28ff606aaf7af76a7e5cb 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -147,6 +147,7 @@ class TasksScheduler: if isinstance(item, Service): str_item_key = grpc_message_to_json_string(item.service_id) + LOGGER.info('Exploring Service: {:s}'.format(str_item_key)) if str_item_key in explored_items: continue include_service(item.service_id) @@ -160,6 +161,7 @@ class TasksScheduler: elif isinstance(item, ServiceId): str_item_key = grpc_message_to_json_string(item) + LOGGER.info('Exploring ServiceId: {:s}'.format(str_item_key)) if str_item_key in explored_items: continue include_service(item) @@ -173,16 +175,22 @@ class TasksScheduler: elif isinstance(item, Connection): str_item_key = grpc_message_to_json_string(item.connection_id) + LOGGER.info('Exploring Connection: {:s}'.format(str_item_key)) if str_item_key in explored_items: continue connection_key = include_connection(item.connection_id, item.service_id) self._add_connection_to_executor_cache(connection) + + #_,service_key_done = include_service(item.service_id) self._executor.get_service(item.service_id) + #self._dag.add(service_key_done, connection_key) pending_items_to_explore.put(item.service_id) + for sub_service_id in connection.sub_service_ids: _,service_key_done = include_service(sub_service_id) self._executor.get_service(sub_service_id) - self._dag.add(connection_key, service_key_done) + self._dag.add(service_key_done, connection_key) + pending_items_to_explore.put(sub_service_id) explored_items.add(str_item_key) @@ -200,8 +208,10 @@ class TasksScheduler: results = [] for task_key in ordered_task_keys: + LOGGER.info('Task {:s} - begin'.format(str(task_key))) task = self._tasks.get(task_key) succeeded = True if dry_run else task.execute() + LOGGER.info('Task {:s} - succeeded={:s}'.format(str(task_key), str(succeeded))) results.append(succeeded) LOGGER.info('execute_all results={:s}'.format(str(results))) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py index ea969214228f2606fdda58d9ba702053ad230bf0..7a99ccbde32e6fad2a126649313ece81d8299537 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py @@ -47,9 +47,10 @@ class Task_ConnectionConfigure(_Task): service_handler_settings = {} service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings) - connection_expander = ConnectionExpander() - traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) - endpointids_to_set = endpointids_to_raw(traversed_endpoint_ids) + #connection_expander = ConnectionExpander() + #traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) + #endpointids_to_set = endpointids_to_raw(traversed_endpoint_ids) + endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids) connection_uuid = connection.connection_id.connection_uuid.uuid results_setendpoint = service_handler.SetEndpoint(endpointids_to_set, connection_uuid=connection_uuid) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py index fc849560ecbe19141efd9ad2e6ca43601cbf5fc0..bef5f85ddf68b036274f294da66ee48222512623 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py @@ -47,9 +47,10 @@ class Task_ConnectionDeconfigure(_Task): service_handler_settings = {} service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings) - connection_expander = ConnectionExpander() - traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) - endpointids_to_delete = endpointids_to_raw(traversed_endpoint_ids) + #connection_expander = ConnectionExpander() + #traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) + #endpointids_to_delete = endpointids_to_raw(traversed_endpoint_ids) + endpointids_to_delete = endpointids_to_raw(connection.path_hops_endpoint_ids) connection_uuid = connection.connection_id.connection_uuid.uuid results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete, connection_uuid=connection_uuid) diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh index 03d17f4282cc9459e57e7a36990df124107f47f8..cdcee08bb035346e8bbf232a637a5e2cba73fcd5 100644 --- a/src/tests/ecoc22/deploy_specs.sh +++ b/src/tests/ecoc22/deploy_specs.sh @@ -2,7 +2,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device service pathcomp slice compute webui" # automation +export TFS_COMPONENTS="context device service automation pathcomp slice compute webui" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" @@ -11,7 +11,7 @@ export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" +export TFS_EXTRA_MANIFESTS="manifests/cttc-ols/cttc-ols.yaml manifests/nginx_ingress_http.yaml" # Set the neew Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 5d7801d11880e89869120985307c6b43416f5a05..9804e4afd1c1b7c889c2f3e0d627471ee13b5c68 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -83,9 +83,9 @@ <a class="nav-link" href="{{ url_for('slice.home') }}">Slice</a> {% endif %} </li> - <li class="nav-item"> + <!--<li class="nav-item"> <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a> - </li> + </li>--> <li class="nav-item"> <a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a> diff --git a/src/webui/service/templates/main/home.html b/src/webui/service/templates/main/home.html index 3cc9fbcffce6cfbb6ebb40dec9d3359f59df5a15..db390939ff926b5bbfbfc6507b0f4e79695f3693 100644 --- a/src/webui/service/templates/main/home.html +++ b/src/webui/service/templates/main/home.html @@ -17,7 +17,7 @@ {% extends 'base.html' %} {% block content %} - <h1>TeraFlow OS SDN Controller</h1> + <h2>ETSI TeraFlowSDN Controller</h2> {% for field, message in context_form.errors.items() %} <div class="alert alert-dismissible fade show" role="alert">