diff --git a/.gitignore b/.gitignore index 1b9e692a37af46fdb04e318d79eb08ac8e6e6eb5..b3806caaa7c5ad2a32d8089a86948b84570a5bb8 100644 --- a/.gitignore +++ b/.gitignore @@ -146,6 +146,7 @@ venv.bak/ # VSCode project settings .vscode/ +.github/ # Visual Studio project settings /.vs diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 91fa65af3742c2a5c51ed8dc40f1241b1bd930cb..035f8eeaf144eae71f88fcd29503d47c3f87a64f 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -43,7 +43,7 @@ spec: # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_datastore/build.sh # - ./src/tests/tools/simap_datastore/deploy.sh - value: "172.17.0.1" + value: "10.254.0.9" - name: SIMAP_DATASTORE_PORT # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_datastore/build.sh diff --git a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py index 04a8b8bd9cb4dca5908029918393abfa27780b38..bd66be2f7048be72347256c58a7cb33ec93444f8 100644 --- a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py +++ b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py @@ -49,7 +49,7 @@ class _Callback: @param old_data: Resource representation before retrieval, if applicable, otherwise `None` @returns boolean indicating whether additional callbacks should be executed, defaults to False ''' - MSG = 'match={:s}, path={:s}, old_data={:s}' + MSG = 'match={}, path={}, old_data={}' msg = MSG.format(match.groupdict(), path, old_data) raise NotImplementedError(msg) @@ -66,7 +66,7 @@ class _Callback: @param new_data: Resource representation after change, if applicable, otherwise `None` @returns boolean indicating whether additional callbacks should be executed, defaults to False ''' - MSG = 'match={:s}, path={:s}, old_data={:s}, new_data={:s}' + MSG = 'match={}, path={}, old_data={}, new_data={}' msg = MSG.format(match.groupdict(), path, old_data, new_data) raise NotImplementedError(msg) @@ -81,7 +81,7 @@ class _Callback: @param input_data: Input data, if applicable, otherwise `None` @returns Optional[Dict] containing output data, defaults to None ''' - MSG = 'match={:s}, path={:s}, input_data={:s}' + MSG = 'match={}, path={}, input_data={:s}' msg = MSG.format(match.groupdict(), path, input_data) raise NotImplementedError(msg) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 08f34b8ad12ae34ee38d94bb18ad762e958c18cb..1fe3b36f0e67b6a450102b62398e1b9847d20880 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -186,6 +186,7 @@ class IetfL3VpnDriver(_Driver): def SetConfig( self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: + LOGGER.info('SetConfig called with resources: {:s}'.format(str(resources))) results = [] if len(resources) == 0: return results with self.__lock: diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py index 3b537a4675db4d16c66756804eebe028a3f94791..68cbc448a63aadf013aecc00236fa92ec5dea2cd 100644 --- a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py @@ -259,10 +259,10 @@ def setup_config_rules( "/service[{:s}]/IETFL3VPN".format(service_uuid), l3_vpn_data_model, ), - #json_config_rule_set( - # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - # {"type": operation_type}, - #), + json_config_rule_set( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {"type": operation_type}, + ), ] return json_config_rules @@ -274,10 +274,10 @@ def teardown_config_rules(service_uuid: str) -> List[Dict]: "/service[{:s}]/IETFL3VPN".format(service_uuid), {"id": service_uuid}, ), - #json_config_rule_delete( - # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - # {}, - #), + json_config_rule_delete( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {"type": "delete"}, + ), ] return json_config_rules diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 8aafffc1aa79090c2ad7bda0b50020c924cae7cb..3917772d165f6fb896408c4cfd474d7f7aab0b17 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -22,7 +22,7 @@ from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from device.client.DeviceClient import DeviceClient from simap_connector.service.telemetry.worker.SynthesizerWorker import SynthesizerWorker -from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum +from simap_connector.service.telemetry.worker._Worker import _Worker, WorkerTypeEnum from .database.Subscription import subscription_get, subscription_set, subscription_delete from .database.SubSubscription import ( sub_subscription_list, sub_subscription_set, sub_subscription_delete @@ -165,13 +165,20 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): link_id = request.link_id bandwidth_factor = request.bandwidth_factor latency_factor = request.latency_factor + # connection_count = request.connection_count + # TODO: Remove bandwidth_factor and latency_factor from the request, as they are not used in the current implementation. + # Add connection_count to the request. + + connection_count = 0 synthesizer_name = '{:s}:{:s}'.format(network_id, link_id) - synthesizer : Optional[SynthesizerWorker] = self._telemetry_pool.get_worker( - WorkerTypeEnum.SYNTHESIZER, synthesizer_name + synthesizer : Optional[_Worker] = self._telemetry_pool.get_worker( + WorkerTypeEnum.SYNTHESIZER, synthesizer_name ) if synthesizer is None: MSG = 'Synthesizer({:s}) not found' raise Exception(MSG.format(synthesizer_name)) - synthesizer.change_resources(bandwidth_factor, latency_factor) + assert isinstance(synthesizer, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(synthesizer).__name__) + synthesizer.change_resources(connection_count) return Empty() diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py index 024f8d70896d9555a0eb51f2730e6b208726ddb6..1e143700ea1f78c9babe886d80d082f2b7b90b0e 100644 --- a/src/simap_connector/service/Tools.py +++ b/src/simap_connector/service/Tools.py @@ -62,24 +62,34 @@ def discover_link_details(restconf_client : RestConfClient, xpath_filter : str) network_id, link_id = link_xpath_match.groups() link_details = LinkDetails(Link(network_id, link_id)) - xpath_filter = link_details.link.get_xpath_filter(add_simap_telemetry=False) - xpath_data = restconf_client.get(xpath_filter) + # Workaround: RESTCONF server doesn't support namespace-prefixed child element paths + # Query at network level and filter the link from response + network_xpath = '/ietf-network:networks/network={:s}'.format(network_id) + xpath_data = restconf_client.get(network_xpath) if not xpath_data: - raise Exception('Resource({:s}) not found in SIMAP Server'.format(str(xpath_filter))) - - links = xpath_data.get('ietf-network-topology:link', list()) - if len(links) == 0: - raise Exception('Link({:s}) not found'.format(str(xpath_filter))) - if len(links) > 1: - raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter))) - link = links[0] - if link['link-id'] != link_id: - MSG = 'Retieved Link({:s}) does not match xpath_filter({:s})' - raise Exception(MSG.format(str(link), str(xpath_filter))) + raise Exception('Network({:s}) not found in SIMAP Server'.format(str(network_xpath))) + + # Extract network data from response + networks = xpath_data.get('ietf-network:network', []) + if len(networks) == 0: + raise Exception('Network({:s}) not found in response'.format(network_id)) + network_data = networks[0] + + # Find the target link + links = network_data.get('ietf-network-topology:link', list()) + link = None + for l in links: + if l['link-id'] == link_id: + link = l + break + + if link is None: + raise Exception('Link({:s}) not found in network({:s})'.format(link_id, network_id)) + supporting_links = link.get('supporting-link', list()) if len(supporting_links) == 0: - MSG = 'No supporting links found for Resource({:s}, {:s})' - raise Exception(MSG.format(str(xpath_filter), str(xpath_data))) + MSG = 'No supporting links found for Link({:s}) in Network({:s})' + raise Exception(MSG.format(str(link_id), str(network_id))) for sup_link in supporting_links: link_details.supporting_links.append(Link( diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 2f5e1146ba00931dfabd0603c1abfa78c817520c..b8782ff9c24fece1bb1189f8b70c3f47d4ec8907 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -24,12 +24,12 @@ from simap_connector.Config import ( SIMAP_DATASTORE_SCHEME, SIMAP_DATASTORE_ADDRESS, SIMAP_DATASTORE_PORT, SIMAP_DATASTORE_USERNAME, SIMAP_DATASTORE_PASSWORD, ) -from .database.Engine import Engine -from .database.models._Base import rebuild_database -from .simap_updater.SimapClient import SimapClient +from .database.Engine import Engine +from .database.models._Base import rebuild_database +from .simap_updater.SimapClient import SimapClient from .simap_updater.SimapUpdater import SimapUpdater -from .telemetry.TelemetryPool import TelemetryPool -from .SimapConnectorService import SimapConnectorService +from .telemetry.TelemetryPool import TelemetryPool +from .SimapConnectorService import SimapConnectorService TERMINATE = threading.Event() diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index e01d78451897a14b9994ba5a748432d90f0b6b6b..9c34b87ae029a744c6f0f64679b5c409585d1307 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,8 +13,27 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, - 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', - 'L11ba', 'L12ab', 'L12ba', 'L13', 'L14' }, - 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, + 'e2e' : { 'L1', 'L2' }, + 'agg' : { 'L14' }, + 'trans-pkt': { 'L3', 'L5', 'L6', 'L9', 'L10', 'L13' }, + # The remaining can not be monitored therefore they are not included in the allowed links for the controllers + # 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', 'L11ba', 'L12ab', 'L12ba', }, +} +# NOTE: Ranges should be less than 100 because the schema does not allow +# bandwidth-utilization to exceed 100% +# As per schema below: (percentage of link capacity) +# /* --- Local typedefs --- */ + # typedef percent { + # type decimal64 { + # fraction-digits 2; + # range "0 .. 100"; + # } + # units "percent"; + # description "0–100 percent value."; + # } +LINKS_CAPACITY = { + 'L1' : 100, 'L2' : 100, 'L3' : 100, 'L4' : 100, + 'L5' : 100, 'L6' : 100, 'L9' : 100, 'L10' : 100, + 'L7ab' : 100, 'L7ba' : 100, 'L8ab' : 100, 'L8ba' : 100, 'L11ab' : 100, + 'L11ba' : 100, 'L12ab': 100, 'L12ba': 100, 'L13' : 100, 'L14' : 100, } diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index d8b04f8d4dd8b07d9bf4d6c3ef01e5190c350aaa..98dc9a92375189d5eff5de10f38790f35f025c84 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -14,12 +14,13 @@ import logging -from enum import Enum +from enum import Enum from typing import Any, Dict, List, Optional, Tuple -from common.tools.context_queries.Device import get_device, get_devices -from common.tools.context_queries.Link import get_link, get_links -from common.tools.context_queries.Topology import get_topology, get_topologies -from common.tools.context_queries.Service import get_service_by_uuid, get_services +from common.tools.context_queries.Device import get_device, get_devices +from common.tools.context_queries.Link import get_link, get_links +from common.tools.context_queries.Topology import get_topology, get_topologies +from common.tools.context_queries.Service import get_service_by_uuid, get_services +from common.tools.context_queries.Connection import get_connection_by_uuid from context.client.ContextClient import ContextClient @@ -41,7 +42,7 @@ KEY_LENGTHS = { CachedEntities.ENDPOINT : 2, CachedEntities.LINK : 1, CachedEntities.SERVICE : 1, - CachedEntities.CONNECTION : 3, + CachedEntities.CONNECTION : 1, } @@ -63,6 +64,7 @@ class ObjectCache: def __init__(self, context_client : ContextClient): self._context_client = context_client self._object_cache : Dict[Tuple[str, str], Any] = dict() + # self.populate_all_cache() # NOTE: Added for testing purposes; should be removed/commented. def get( self, entity : CachedEntities, *object_uuids : str, @@ -113,6 +115,10 @@ class ObjectCache: object_inst = get_service_by_uuid( self._context_client, object_uuids[0], rw_copy=False ) + elif entity == CachedEntities.CONNECTION: + object_inst = get_connection_by_uuid( + self._context_client, object_uuids[0], rw_copy=False + ) else: MSG = 'Not Supported ({:s}, {:s})' LOGGER.warning(MSG.format(str(entity.value).title(), str(object_uuids))) @@ -124,7 +130,9 @@ class ObjectCache: return None self.set(entity, object_inst, object_uuids[0]) - self.set(entity, object_inst, object_inst.name) + # Connections don't have a name field, so skip setting by name + if entity != CachedEntities.CONNECTION: + self.set(entity, object_inst, object_inst.name) if entity == CachedEntities.DEVICE: device_uuid = object_inst.device_id.device_uuid.uuid @@ -180,7 +188,9 @@ class ObjectCache: for (object_uuid, object_name), object_inst in objects.items(): self.set(entity, object_inst, object_uuid) - self.set(entity, object_inst, object_name) + # Connections don't have a name field (object_name is same as UUID), so skip redundant set + if entity != CachedEntities.CONNECTION: + self.set(entity, object_inst, object_name) if entity == CachedEntities.DEVICE: for endpoint in object_inst.device_endpoints: @@ -199,3 +209,19 @@ class ObjectCache: def delete(self, entity : CachedEntities, *object_uuids : str) -> None: object_key = compose_object_key(entity, *object_uuids) self._object_cache.pop(object_key, None) + + def populate_all_cache(self) -> None: + """Populate cache with all entities for testing purposes.""" + LOGGER.info('Populating cache with all entities for testing...') + for entity in CachedEntities: + if entity in (CachedEntities.ENDPOINT, CachedEntities.CONNECTION): + # Endpoints are populated when devices are updated + # Connections are service-scoped; cached on-demand during events + continue + try: + self._update_all(entity) + # LOGGER.info('Populated cache for entity: {:s}'.format(entity.value)) + except Exception as e: + LOGGER.warning('Failed to populate cache for entity {:s}: {:s}'.format( + entity.value, str(e))) + LOGGER.info('Cache population completed') diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py new file mode 100644 index 0000000000000000000000000000000000000000..184df61e95fc608e36ab8ea28b0a1fcf260d8412 --- /dev/null +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -0,0 +1,283 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from typing import Dict, List, Tuple +from context.client.ContextClient import ContextClient +from common.tools.context_queries.Device import get_device +from .SimapClient import SimapClient + + +LOGGER = logging.getLogger(__name__) + +def extract_network_data(context_client: ContextClient, network_id: str, network_connection: dict) -> list[tuple[str, dict]]: + + try: + # Extract path_hops_endpoint_ids from network_connection dict + path_hops = network_connection.get('path_hops_endpoint_ids', []) + + if not path_hops: + LOGGER.warning(f"No path_hops_endpoint_ids found in network_connection for network {network_id}") + return [] + + if len(path_hops) < 2: + LOGGER.warning(f"Connection path too short (less than 2 hops) for network {network_id}") + return [] + + # Extract first and last hops (SDPs - Service Demarcation Points) + first_hop = path_hops[0] + last_hop = path_hops[-1] + + # Extract device and endpoint UUIDs for SDPs + first_device_uuid = first_hop.get('device_id', {}).get('device_uuid', {}).get('uuid', '') + first_endpoint_uuid = first_hop.get('endpoint_uuid', {}).get('uuid', '') + + last_device_uuid = last_hop.get('device_id', {}).get('device_uuid', {}).get('uuid', '') + last_endpoint_uuid = last_hop.get('endpoint_uuid', {}).get('uuid', '') + + if not all([first_device_uuid, first_endpoint_uuid, last_device_uuid, last_endpoint_uuid]): + LOGGER.warning(f"Invalid first or last hop in path_hops_endpoint_ids for network {network_id}") + return [] + + # Prepare results for exactly 2 SDPs + network_data: List[Tuple[str, Dict[str, List[str]]]] = [] + + # Process first device (sdp1) + try: + first_device = get_device( + context_client, first_device_uuid, rw_copy=False, + include_endpoints=True, include_config_rules=False, include_components=False + ) + if first_device is None: + LOGGER.warning(f"First device with UUID {first_device_uuid} not found in context") + return [] + + first_device_name = first_device.name + + # Find the service-facing endpoint name + first_endpoint_name = None + for endpoint in first_device.device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == first_endpoint_uuid: + first_endpoint_name = endpoint.name + break + + if not first_endpoint_name: + LOGGER.warning(f"First endpoint {first_endpoint_uuid} not found in device {first_device_name}") + return [] + + network_data.append((first_device_name, {'termination_points': [first_endpoint_name]})) + + except Exception as e: + LOGGER.error(f"Error retrieving first device {first_device_uuid} from context: {e}") + return [] + + # Process last device (sdp2) + try: + last_device = get_device( + context_client, last_device_uuid, rw_copy=False, + include_endpoints=True, include_config_rules=False, include_components=False + ) + if last_device is None: + LOGGER.warning(f"Last device with UUID {last_device_uuid} not found in context") + return [] + + last_device_name = last_device.name + + # Find the service-facing endpoint name + last_endpoint_name = None + for endpoint in last_device.device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == last_endpoint_uuid: + last_endpoint_name = endpoint.name + break + + if not last_endpoint_name: + LOGGER.warning(f"Last endpoint {last_endpoint_uuid} not found in device {last_device_name}") + return [] + + network_data.append((last_device_name, {'termination_points': [last_endpoint_name]})) + + except Exception as e: + LOGGER.error(f"Error retrieving last device {last_device_uuid} from context: {e}") + return [] + + LOGGER.info(f"Extracted network data for {network_id}: {network_data}") + return network_data + + except Exception as e: + LOGGER.error(f"Error extracting network data from connection for network {network_id}: {e}") + return [] + + +def set_simap_network(context_client: ContextClient, simap_client: SimapClient, network_id: str, network_connection: dict) -> None: + """ + Configure a SIMAP network with preset configurations. + + Args: + context_client: ContextClient instance + simap_client: SimapClient instance + network_id: Network identifier ('e2e', 'agg', or 'trans-pkt') + network_connection: Dictionary representation of Connection protobuf with path_hops_endpoint_ids + """ + + LOGGER.info(f"Setting SIMAP network: {network_id} for connection with {len(network_connection.get('path_hops_endpoint_ids', []))} hops") + network_data : list[tuple[str, dict]] = extract_network_data(context_client, network_id, network_connection) + + if network_id == 'e2e': + try: + # E2E Network Configuration + simap = simap_client.network('e2e') + simap.update(supporting_network_ids=['admin', 'agg']) + + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] + + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for E2E network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('E2E-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L1'), ('agg', 'AggNet-L1') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring E2E network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring E2E network: {e}') + return + + elif network_id == 'agg': + try: + # Aggregation Network Configuration + simap = simap_client.network('agg') + simap.update(supporting_network_ids=['admin', 'trans-pkt']) + + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Aggregation network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('AggNet-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('trans-pkt', 'Trans-L1'), ('admin', 'L13'), ('admin', 'L3') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring Aggregation network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring Aggregation network: {e}') + return + + elif network_id == 'trans-pkt': + try: + # Transport Packet Network Configuration + simap = simap_client.network('trans-pkt') + simap.update(supporting_network_ids=['admin']) + + # Configure nodes + node_names = ['site1', 'site2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Transport Packet network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('Trans-L1') + link.update( + 'site1', endpoints[0], 'site2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L6'), ('admin', 'L10') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring Transport Packet network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring Transport Packet network: {e}') + return + + else: + MSG = 'Unsupported network_id({:s}) to set SIMAP' + LOGGER.warning(MSG.format(str(network_id))) + return + + LOGGER.info(f'Successfully configured SIMAP network: {network_id}') + + +def delete_simap_network(simap_client: SimapClient, network_id: str) -> None: + """ + Delete a SIMAP network configuration. + + Args: + simap_client: SimapClient instance + network_id: Network identifier ('e2e', 'agg', or 'trans-pkt') + """ + if network_id == 'e2e': + simap = simap_client.network('e2e') + simap.update(supporting_network_ids=['admin', 'agg']) + + link = simap.link('E2E-L1') + link.delete() + + elif network_id == 'agg': + simap = simap_client.network('agg') + simap.update(supporting_network_ids=['admin', 'trans-pkt']) + + link = simap.link('AggNet-L1') + link.delete() + + elif network_id == 'trans-pkt': + simap = simap_client.network('trans-pkt') + simap.update(supporting_network_ids=['admin']) + + link = simap.link('Trans-L1') + link.delete() + + else: + MSG = 'Unsupported network_id({:s}) to delete SIMAP' + LOGGER.warning(MSG.format(str(network_id))) + return + + LOGGER.info(f'Successfully deleted SIMAP network: {network_id}') diff --git a/src/simap_connector/service/simap_updater/SimapClient.py b/src/simap_connector/service/simap_updater/SimapClient.py index 725b08bd47e0bd127cf0f7c4131cb744313b149d..8cdf4708ee401441f401a8d5f919f840e1c624c3 100644 --- a/src/simap_connector/service/simap_updater/SimapClient.py +++ b/src/simap_connector/service/simap_updater/SimapClient.py @@ -64,7 +64,7 @@ class TerminationPoint: class NodeTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -173,7 +173,7 @@ class Node: class LinkTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client @@ -197,8 +197,8 @@ class LinkTelemetry: def get(self) -> Dict: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) - telemetry : Dict = self._restconf_client.get(endpoint) - return telemetry + link : Dict = self._restconf_client.get(endpoint) + return link.get('ietf-network-topology:link', [{}])[0].get('simap-telemetry:simap-telemetry', {}) def update( self, bandwidth_utilization : float, latency : float, @@ -210,14 +210,17 @@ class LinkTelemetry: 'latency' : '{:.3f}'.format(latency), } if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids - link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} - network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} payload = {'ietf-network:networks': {'network': [network]}} self._restconf_client.patch(endpoint, payload) def delete(self) -> None: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) - self._restconf_client.delete(endpoint) + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': {}} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) class Link: diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 573085ac9182fac7f3d77740f2876146f8d394de..4abe81a52197b4b45925f6efdf0c3f4a8e27c7df 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -14,28 +14,28 @@ import logging, queue, threading, uuid -from typing import Any, Optional, Set +from typing import Any, List, Optional, Set, Tuple from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( - ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, + ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, ServiceStatusEnum, SliceEvent, TopologyEvent, ConnectionEvent ) -from common.tools.grpc.BaseEventCollector import BaseEventCollector +from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.grpc.Tools import grpc_message_to_json_string, grpc_message_to_json from context.client.ContextClient import ContextClient from simap_connector.service.telemetry.worker.data.Resources import ( ResourceLink, Resources, SyntheticSampler ) from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum -from simap_connector.service.telemetry.TelemetryPool import TelemetryPool -from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER -from .MockSimaps import delete_mock_simap, set_mock_simap -from .ObjectCache import CachedEntities, ObjectCache -from .SimapClient import SimapClient -from .Tools import get_device_endpoint, get_link_endpoint #, get_service_endpoint - +from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool +from .RealSimaps import set_simap_network, delete_simap_network +from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER, LINKS_CAPACITY +# from .MockSimaps import delete_mock_simap, set_mock_simap +from .ObjectCache import CachedEntities, ObjectCache +from .SimapClient import SimapClient +from .Tools import get_device_endpoint, get_link_endpoint, get_connection_endpoints_and_links #, get_service_endpoint LOGGER = logging.getLogger(__name__) @@ -51,16 +51,16 @@ SKIPPED_DEVICE_TYPES = { class EventDispatcher(BaseEventDispatcher): def __init__( self, events_queue : queue.PriorityQueue, - simap_client : SimapClient, - context_client : ContextClient, - telemetry_pool : TelemetryPool, - terminate : Optional[threading.Event] = None + simap_client : SimapClient, + context_client : ContextClient, + telemetry_pool : TelemetryPool, + terminate : Optional[threading.Event] = None ) -> None: super().__init__(events_queue, terminate) - self._simap_client = simap_client - self._context_client = context_client - self._telemetry_pool = telemetry_pool - self._object_cache = ObjectCache(self._context_client) + self._simap_client = simap_client + self._context_client = context_client + self._telemetry_pool = telemetry_pool + self._object_cache = ObjectCache(self._context_client) self._skipped_devices : Set[str] = set() @@ -357,31 +357,19 @@ class EventDispatcher(BaseEventDispatcher): te_link = te_topo.link(link_name) te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) - resources = Resources() - resources.links.append(ResourceLink( - domain_name=topology_name, link_name=link_name, - bandwidth_utilization_sampler=SyntheticSampler.create_random( - amplitude_scale = 25.0, - phase_scale = 1e-7, - period_scale = 86_400, - offset_scale = 25, - noise_ratio = 0.05, - min_value = 0.0, - max_value = 100.0, - ), - latency_sampler=SyntheticSampler.create_random( - amplitude_scale = 0.5, - phase_scale = 1e-7, - period_scale = 60.0, - offset_scale = 10.0, - noise_ratio = 0.05, - min_value = 0.0, - ), - related_service_ids=[], - )) - sampling_interval = 1.0 - self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + # worker_name = '{:s}:{:s}'.format(topology_name, link_name) + # resources = Resources() + # resources.links.append(ResourceLink( + # domain_name = topology_name, + # link_name = link_name, + # metrics_sampler = SyntheticSampler.create_random( + # connection_count = 0, + # link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + # ), + # related_service_ids=[], + # )) + # sampling_interval = 1.0 + # self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) return True @@ -448,8 +436,8 @@ class EventDispatcher(BaseEventDispatcher): self._object_cache.delete(CachedEntities.LINK, link_uuid) self._object_cache.delete(CachedEntities.LINK, link_name) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) - self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + # worker_name = '{:s}:{:s}'.format(topology_name, link_name) + # self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) MSG = 'Link Removed: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -491,17 +479,17 @@ class EventDispatcher(BaseEventDispatcher): # LOGGER.warning(MSG.format(str_service_event, str_service)) # return False - topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) - topology_names = {t.name for t in topologies} - topology_names.discard(DEFAULT_TOPOLOGY_NAME) - if len(topology_names) != 1: - MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' - str_service_event = grpc_message_to_json_string(service_event) - LOGGER.warning(MSG.format(str_service_event)) - return False + # topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + # topology_names = {t.name for t in topologies} + # topology_names.discard(DEFAULT_TOPOLOGY_NAME) + # if len(topology_names) != 1: + # MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' + # str_service_event = grpc_message_to_json_string(service_event) + # LOGGER.warning(MSG.format(str_service_event)) + # return False - domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - set_mock_simap(self._simap_client, domain_name) + # domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net + # set_mock_simap(self._simap_client, domain_name) #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() @@ -611,7 +599,7 @@ class EventDispatcher(BaseEventDispatcher): return domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - delete_mock_simap(self._simap_client, domain_name) + # delete_mock_simap(self._simap_client, domain_name) #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() @@ -663,57 +651,322 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Processing Connection Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(connection_event))) - # Here a connection object from context is received in connection_event. - # Here is gRPC message definition: message Connection { ConnectionId connection_id = 1; ServiceId service_id = 2; repeated EndPointId path_hops_endpoint_ids = 3; repeated ServiceId sub_service_ids = 4; ConnectionSettings settings = 5;} - # discard sub_service_ids and settings for now, as not used in SIMAP population. - # Extract service_id, endpoint_ids from connection_event to identify the connection. - # Get all links using gRPC ListLinkIds() from context, and find which link(s) correspond to the connection's endpoint_ids. - # Then update SIMAP accordingly. - # Then, do this only for connections that correspond to links that this controller is allowed to manage, as per ALLOWED_LINKS_PER_CONTROLLER. - # Then, do something like this (pseudocode): - # worker_name = '{:s}:{:s}'.format(topology_name, link_name) - # resources = Resources() - # resources.links.append(ResourceLink( - # domain_name=topology_name, link_name=link_name, - # bandwidth_utilization_sampler=SyntheticSampler.create_random( - # amplitude_scale = 25.0, - # phase_scale = 1e-7, - # period_scale = 86_400, - # offset_scale = 25, - # noise_ratio = 0.05, - # min_value = 0.0, - # max_value = 100.0, - # ), - # latency_sampler=SyntheticSampler.create_random( - # amplitude_scale = 0.5, - # phase_scale = 1e-7, - # period_scale = 60.0, - # offset_scale = 10.0, - # noise_ratio = 0.05, - # min_value = 0.0, - # ), - # related_service_ids=[], - # )) - # sampling_interval = 1.0 - # self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + # Extract connection UUID from event + connection_uuid = connection_event.connection_id.connection_uuid.uuid + + # Clean up any stale mapping for this connection (e.g., if connection is being re-created) + old_mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + if old_mapping is not None and isinstance(old_mapping, dict) and 'domain' in old_mapping: + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + LOGGER.debug('Removed stale mapping for connection {:s} before processing'.format(connection_uuid)) + + try: + # Use common helper to prepare connection data + result = self._prepare_connection_processing(connection_uuid) + if result is None: + return False + (domain_name, processed_links) = result + + # Update telemetry for each link involved in this connection + for link_uuid, link_name, link_topology_name in processed_links: + # Count active connections on this link + active_conn_count = self._count_active_connections(link_uuid, domain_name) + LOGGER.info('Connection {:s} uses allowed link: {:s} (uuid: {:s})'.format(connection_uuid, link_name, link_uuid)) + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + + # --- TEMPORTYY: Check for special triggering rules for L6 in trans-pkt domain --- + if link_name == "L6": + # Check for special triggering rules (e.g., L6 triggers L3 and L13) + triggered_links = self._check_and_trigger_additional_links( + link_topology_name, active_conn_count) + # Update the cached mapping to include triggered links + if triggered_links: + mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + if mapping and isinstance(mapping, dict): + mapping['triggered_links'] = triggered_links + self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) + LOGGER.debug('Updated connection {:s} mapping with {:d} triggered links'.format( + connection_uuid, len(triggered_links))) + # --- END OF TEMPORARY LOGIC --- + + # Worker should already exist from _dispatch_link_set (link creation event) + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + LOGGER.warning('Worker not found for link {:s}, creating and starting new worker'.format(link_name)) + + # Create worker with same parameters as in _dispatch_link_set + resources = Resources() + resources.links.append(ResourceLink( + domain_name = link_topology_name, + link_name = link_name, + metrics_sampler = SyntheticSampler.create_random( + connection_count = active_conn_count, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + ), + related_service_ids=[], # TODO: populate with actual related services if needed (later) + )) + sampling_interval = 1.0 + self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + LOGGER.info('Started new synthesizer worker: {:s}'.format(worker_name)) + else: + # Worker exists, update connection count for congestion simulation + worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + assert isinstance(worker, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + + worker.change_resources(active_conn_count) + LOGGER.info('Updated telemetry of already running worker: link {:s}, connection_count={:d}'.format( + link_name, active_conn_count)) + + except Exception as e: + LOGGER.exception('Failed to process connection event {:s}: {:s}'.format(connection_uuid, str(e))) + return False return True + # TEMPORARY: This function implements the special triggering rules for L6 in trans-pkt domain. + def _check_and_trigger_additional_links( + self, link_topology_name: str, active_conn_count: int + ) -> List[Tuple[str, str, str]]: + """ + Check for special triggering rules and start additional workers. + + Rule: When L6 is processed in trans-pkt domain, also start workers for L3 and L13. + + Args: + connection_uuid: UUID of the connection being processed + domain_name: Domain name (e.g., 'trans-pkt') + processed_links: List of (link_uuid, link_name, link_topology_name) already processed + + Returns: + List of triggered links with format: (link_uuid, link_name, topology_name) + """ + triggered_links = [] + + # Trigger workers for L3 and L13 using same topology as L6 + for link_name in ['L3', 'L13']: + # Generate UUID for the triggered link + link_uuid = str(uuid.uuid4()) + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + + LOGGER.info('Triggering worker for link {:s} (generated uuid: {:s})'.format(link_name, link_uuid)) + + # Check if worker already exists + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + # Create and start worker + resources = Resources() + resources.links.append(ResourceLink( + domain_name = link_topology_name, + link_name = link_name, + metrics_sampler = SyntheticSampler.create_random( + connection_count = active_conn_count, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + ), + related_service_ids = [], + )) + sampling_interval = 1.0 + self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + LOGGER.info('Started triggered synthesizer worker: {:s}'.format(worker_name)) + else: + LOGGER.info('Worker {:s} already exists, skipping creation'.format(worker_name)) + + triggered_links.append((link_uuid, link_name, link_topology_name)) + + return triggered_links + + + def _prepare_connection_processing(self, connection_uuid: str): + """ + Extract common logic for processing connection events. + + Returns: + Tuple of ( domain_name, processed_links) or None if failed + """ + # Get the connection object + connection = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid) + if connection is None: + LOGGER.warning('Connection {:s} not found in cache'.format(connection_uuid)) + return None + + MSG = 'Processing Connection: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(connection))) + + _, link_uuids = get_connection_endpoints_and_links(connection_uuid) + + # Determine the controller's domain name (network_id) + topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + topology_names = {t.name for t in topologies} + topology_names.discard(DEFAULT_TOPOLOGY_NAME) + if len(topology_names) != 1: + LOGGER.warning('Unable to identify self-controller for connection {:s} and {!r}'.format(connection_uuid, topology_names)) + return None + domain_name = topology_names.pop() + + # Call set_simap_network with proper parameters + network_connection = grpc_message_to_json(connection) + set_simap_network(self._context_client, self._simap_client, domain_name, network_connection) + LOGGER.info('Set SIMAP network for connection {:s} in domain {:s}'.format(connection_uuid, domain_name)) + + # Filter links based on ALLOWED_LINKS_PER_CONTROLLER + allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(domain_name, set()) + LOGGER.debug('Allowed links for domain {:s}: {:s}'.format(domain_name, str(allowed_link_names))) + processed_links = [] + for link_uuid in link_uuids: + link = self._object_cache.get(CachedEntities.LINK, link_uuid) + if link.name in allowed_link_names: + # Get the link's topology for worker naming + link_topology_uuid, _ = get_link_endpoint(link) + link_topology = self._object_cache.get(CachedEntities.TOPOLOGY, link_topology_uuid) + processed_links.append((link_uuid, link.name, link_topology.name)) + + if not processed_links: + LOGGER.debug('Connection {:s} has no allowed links for domain {:s}'.format( + connection_uuid, domain_name)) + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + return None + + # Cache the connection-to-links mapping for later retrieval (e.g., during REMOVE events) + mapping = { + 'domain': domain_name, + 'links': {link_uuid: {'name': link_name, 'topology': link_topo_name} for link_uuid, link_name, link_topo_name in processed_links}, + 'triggered_links': [] # Will store additional links triggered by special rules + } + self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) + LOGGER.debug('Cached connection {:s} mapping with {:d} links for domain {:s}'.format( + connection_uuid, len(processed_links), domain_name)) + + return domain_name, processed_links + + + def _count_active_connections(self, link_uuid: str, domain_name: str, ) -> int: + """ + Count active connections using a specific link. + + Args: + link_uuid: UUID of the link to count connections for + domain_name: Domain name to filter connections + Returns: + int: Number of active connections using this link + """ + + all_cached_connections = self._object_cache.get_all(CachedEntities.CONNECTION, fresh=False) + active_count = 0 + for cached_obj in all_cached_connections: + if not isinstance(cached_obj, dict) or 'domain' not in cached_obj or 'links' not in cached_obj: + continue + + if cached_obj['domain'] != domain_name: + continue + + if link_uuid in cached_obj['links']: + active_count += 1 + + LOGGER.info('Active connection count on link {:s} in domain {:s}: {:d}'.format( + link_uuid, domain_name, active_count)) + return active_count + + def dispatch_connection_create(self, connection_event : ConnectionEvent) -> None: if not self.dispatch_connection_set(connection_event): return MSG = 'Skipping Connection Create Event: {:s}' LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) - + + def dispatch_connection_update(self, connection_event : ConnectionEvent) -> None: if not self.dispatch_connection_set(connection_event): return MSG = 'Skipping Connection Update Event: {:s}' LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) - + + def dispatch_connection_remove(self, connection_event : ConnectionEvent) -> None: - MSG = 'Skipping Connection Remove Event: {:s}' - LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) + MSG = 'Processing Connection Remove Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(connection_event))) + + connection_uuid = connection_event.connection_id.connection_uuid.uuid + + try: + mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + LOGGER.info('Retrieved mapping for connection {:s}: {:s}'.format(connection_uuid, str(mapping))) + if mapping is None: + MSG = 'Connection {:s} not managed by this controller (not in allowed links), skipping removal' + LOGGER.debug(MSG.format(connection_uuid)) + return + + # Defensive: distinguish mapping dicts from potential protobuf Connection objects + elif not isinstance(mapping, dict) or 'domain' not in mapping or 'links' not in mapping: + MSG = 'Invalid mapping structure for connection {:s}: expected dict with domain/links keys' + raise Exception(MSG.format(connection_uuid)) + + # Extract domain and links from cached mapping + domain_name = mapping['domain'] + link_uuids_dict = mapping['links'] + processed_links = [(link_uuid, link_data['name'], link_data['topology']) for link_uuid, link_data in link_uuids_dict.items()] + + LOGGER.info('Retrieved cached mapping for connection {:s}: domain={:s}, links={:d}'.format( + connection_uuid, domain_name, len(processed_links))) + + # Delete the connection from cache first (we already have the mapping) + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + LOGGER.debug('Deleted cached mapping for connection {:s}'.format(connection_uuid)) + + # Process each link: count remaining connections and stop/update worker accordingly + all_links_stopped = True # Track if all links have been stopped + for link_uuid, link_name, link_topology_name in processed_links: + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + LOGGER.warning('Worker not found for link {:s}, skipping telemetry update for connection removal'.format(link_name)) + continue + + # Count remaining connections on this link (now excluding the deleted one) + remaining_conn_count = self._count_active_connections(link_uuid, domain_name) + + if remaining_conn_count == 0: + # No other connections use this link, stop the worker + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + LOGGER.info('Stopped telemetry worker for link {:s}, no connections remain'.format(link_name)) + + # ---- TEMPORARY: Stop triggered links (L3 and L13 when L6 is removed from trans-pkt) ---- + if link_name == "L6": + try: + triggered_links = mapping.get('triggered_links', []) + if triggered_links: + LOGGER.info('Connection {:s} has {:d} triggered links to clean up'.format( + connection_uuid, len(triggered_links))) + + for _, trig_link_name, trig_link_topology_name in triggered_links: + trig_worker_name = '{:s}:{:s}'.format(trig_link_topology_name, trig_link_name) + + if self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, trig_worker_name): + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, trig_worker_name) + LOGGER.info('Stopped triggered telemetry worker for link {:s}'.format(trig_link_name)) + else: + LOGGER.warning('Triggered worker {:s} not found during cleanup'.format(trig_worker_name)) + except Exception as e: + LOGGER.exception('Failed to stop triggered links for connection {:s}: {:s}'.format(connection_uuid, str(e))) + # ---- END OF TEMPORARY LOGIC ---- + else: + # Other connections still use this link, update worker with new count + all_links_stopped = False + worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + assert isinstance(worker, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + + worker.change_resources(remaining_conn_count) + LOGGER.info('Updated telemetry for link {:s} after connection removal, {:d} connections remain'.format( + link_name, remaining_conn_count)) + + # Delete SIMAP network only if all links have been stopped + if all_links_stopped: + delete_simap_network(self._simap_client, domain_name) + LOGGER.info('Deleted SIMAP network for domain {:s} after all links stopped'.format(domain_name)) + else: + LOGGER.debug('SIMAP network {:s} retained, some links still have active connections'.format(domain_name)) + + except Exception as e: + LOGGER.exception('Failed to process connection removal {:s}: {:s}'.format( + connection_uuid, str(e))) class SimapUpdater: diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index d420f24e9b3714939b2900155c0a1bba2d350a90..1c5c3e092e2792876414b48634abd9642218a481 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -16,11 +16,12 @@ import enum from typing import List, Optional, Set, Tuple, Union from common.proto.context_pb2 import ( - EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, - DeviceEvent, Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent + EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, DeviceEvent, + Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent, Empty ) from common.tools.grpc.Tools import grpc_message_to_json_string - +from context.client.ContextClient import ContextClient +from common.tools.context_queries.Connection import get_connection_by_uuid class EventTypeEnum(enum.IntEnum): CREATE = EVENTTYPE_CREATE @@ -160,3 +161,64 @@ def get_service_endpoint(service : Service) -> Tuple[Optional[str], List[Tuple[s raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e return topology_uuid, endpoint_uuids + + +def get_connection_endpoints_and_links(connection_id: str) -> Tuple[List[Tuple[str, str]], List[str]]: + """ + Retrieve connection details and identify associated links. + Args: + connection_id: UUID string of the connection + Returns: + Tuple of: + - List of endpoint tuples (device_uuid, endpoint_uuid) in path order + - List of link UUIDs corresponding to consecutive endpoint pairs + """ + + context_client = ContextClient() + connection = get_connection_by_uuid(context_client, connection_id, rw_copy=False) + + if connection is None: + raise Exception(f"Failed to retrieve Connection({connection_id}): Connection not found") + + # Extract path_hops_endpoint_ids + endpoint_ids = [] + for endpoint_id in connection.path_hops_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_ids.append((device_uuid, endpoint_uuid)) + + if len(endpoint_ids) < 2: + # No path or single endpoint - no links + return endpoint_ids, [] + + # Find links connecting consecutive endpoint pairs + # Get all links from context + + link_list = context_client.ListLinks(Empty()) + link_uuids = [] + + # For each consecutive pair of endpoints in the path + for i in range(len(endpoint_ids) - 1): + src_device_uuid, src_endpoint_uuid = endpoint_ids[i] + dst_device_uuid, dst_endpoint_uuid = endpoint_ids[i + 1] + + # Find link connecting these endpoints + for link in link_list.links: + if len(link.link_endpoint_ids) != 2: + continue + + # Extract link endpoints + link_ep0_device = link.link_endpoint_ids[0].device_id.device_uuid.uuid + link_ep0_endpoint = link.link_endpoint_ids[0].endpoint_uuid.uuid + link_ep1_device = link.link_endpoint_ids[1].device_id.device_uuid.uuid + link_ep1_endpoint = link.link_endpoint_ids[1].endpoint_uuid.uuid + + # Check if link matches (bidirectional check) + if ((link_ep0_device == src_device_uuid and link_ep0_endpoint == src_endpoint_uuid and + link_ep1_device == dst_device_uuid and link_ep1_endpoint == dst_endpoint_uuid) or + (link_ep1_device == src_device_uuid and link_ep1_endpoint == src_endpoint_uuid and + link_ep0_device == dst_device_uuid and link_ep0_endpoint == dst_endpoint_uuid)): + link_uuids.append(link.link_id.link_uuid.uuid) + break + + return endpoint_ids, link_uuids diff --git a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py index 075c3b6d6e5cda25f342b2814bd66b0e23fd812f..e4109790c5d56a610628c0008683e88667545fad 100644 --- a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py +++ b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py @@ -72,13 +72,17 @@ class AggregatorWorker(_Worker): def run(self) -> None: self._logger.info('[run] Starting...') + MSG = '[run] Aggregating link ({:s}, {:s}) every {:.1f}s' + self._logger.info(MSG.format( + self._network_id, self._link_id, self._sampling_interval + )) kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_BOOT_SERVERS) update_counter = 1 try: while not self._stop_event.is_set() and not self._terminate.is_set(): - #self._logger.debug('[run] Aggregating...') + self._logger.debug('[run] Aggregation cycle #{:d}...'.format(update_counter)) link_sample = self._aggregation_cache.aggregate() @@ -110,6 +114,12 @@ class AggregatorWorker(_Worker): link_sample.bandwidth_utilization, link_sample.latency, related_service_ids=list(link_sample.related_service_ids) ) + + MSG = '[run] Updated SIMAP link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms' + self._logger.debug(MSG.format( + self._network_id, self._link_id, + link_sample.bandwidth_utilization, link_sample.latency + )) update_counter += 1 diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index 27b665d05d487fd165f78a00722af72222bc9ef2..0827a3f8ce91e2c35877be79816b4b166cddf07e 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -14,6 +14,7 @@ import json, math, requests, threading, time +from requests.auth import HTTPBasicAuth from requests.exceptions import ReadTimeout from typing import Optional from .data.AggregationCache import AggregationCache, LinkSample @@ -31,6 +32,7 @@ CONTROLLER_TO_ADDRESS_PORT = { WAIT_LOOP_GRANULARITY = 0.5 +AUTH = HTTPBasicAuth('admin', 'admin') class CollectorWorker(_Worker): def __init__( @@ -73,7 +75,7 @@ class CollectorWorker(_Worker): # NOTE: Trick: we set 1-second read_timeout to force the loop to give control # back and be able to check termination events. # , timeout=(10, 1) - with session.get(stream_url, stream=True) as reply: + with session.get(stream_url, stream=True, auth=AUTH) as reply: reply.raise_for_status() it_lines = reply.iter_lines(decode_unicode=True, chunk_size=1024) @@ -140,7 +142,7 @@ class CollectorWorker(_Worker): MSG = '[direct_simap_polling] Requesting "{:s}"...' self._logger.info(MSG.format(str(simap_url))) - with requests.get(simap_url, timeout=10) as reply: + with requests.get(simap_url, timeout=10, auth=AUTH) as reply: reply.raise_for_status() data = reply.json() diff --git a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py index 884a2cff8c794eab325a4d527460e087820420c4..e8007052714ae23dbd6e75b2a00dd778d772be8c 100644 --- a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py +++ b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py @@ -34,11 +34,10 @@ class SynthesizerWorker(_Worker): self._resources = resources self._sampling_interval = sampling_interval - def change_resources(self, bandwidth_factor : float, latency_factor : float) -> None: + def change_resources(self, connection_count: int) -> None: with self._lock: for link in self._resources.links: - link.bandwidth_utilization_sampler.offset *= bandwidth_factor - link.latency_sampler.offset *= latency_factor + link.metrics_sampler.connection_count = connection_count def run(self) -> None: self._logger.info('[run] Starting...') diff --git a/src/simap_connector/service/telemetry/worker/_Worker.py b/src/simap_connector/service/telemetry/worker/_Worker.py index ae0da4fc78e076b9887f8b9164d5d1066dd7f7b5..e6fe4f1fb1f6167c4c1d0db7a45cac2b7de2ed85 100644 --- a/src/simap_connector/service/telemetry/worker/_Worker.py +++ b/src/simap_connector/service/telemetry/worker/_Worker.py @@ -35,12 +35,12 @@ class _Worker(threading.Thread): ) -> None: self._worker_type = worker_type self._worker_name = worker_name - self._worker_key = get_worker_key(worker_type, worker_name) + self._worker_key = get_worker_key(worker_type, worker_name) name = 'TelemetryWorker({:s})'.format(self._worker_key) super().__init__(name=name, daemon=True) - self._logger = logging.getLogger(name) - self._stop_event = threading.Event() - self._terminate = threading.Event() if terminate is None else terminate + self._logger = logging.getLogger(name) + self._stop_event = threading.Event() + self._terminate = threading.Event() if terminate is None else terminate @property def worker_type(self) -> WorkerTypeEnum: return self._worker_type diff --git a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py index 31a71d09634da3480d202cbaa4f3e20866deb154..7c71a8926bed4018cd52ac055c2a581fbaab30ae 100644 --- a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py +++ b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py @@ -13,10 +13,13 @@ # limitations under the License. -import threading +import logging, threading from dataclasses import dataclass, field -from datetime import datetime -from typing import Dict, Set, Tuple +from datetime import datetime, timezone +from typing import Dict, Optional, Set, Tuple + + +LOGGER = logging.getLogger(__name__) @dataclass @@ -40,18 +43,59 @@ class AggregationCache: def __init__(self) -> None: self._lock = threading.Lock() self._samples : Dict[Tuple[str, str], LinkSample] = dict() + self._last_valid_aggregation : Optional[AggregatedLinkSample] = None def update(self, link_sample : LinkSample) -> None: link_key = (link_sample.network_id, link_sample.link_id) with self._lock: self._samples[link_key] = link_sample + + MSG = '[update] Received sample for link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms, Services={:s}' + LOGGER.debug(MSG.format( + link_sample.network_id, link_sample.link_id, + link_sample.bandwidth_utilization, link_sample.latency, + str(link_sample.related_service_ids) + )) def aggregate(self) -> AggregatedLinkSample: with self._lock: - agg = AggregatedLinkSample(timestamp=datetime.utcnow()) - for sample in self._samples.values(): + num_samples = len(self._samples) + if num_samples > 0: + MSG = '[aggregate] Aggregating {:d} supporting link(s)' + LOGGER.info(MSG.format(num_samples)) + + if num_samples == 0: + if self._last_valid_aggregation is not None: + MSG = '[aggregate] No samples available, reusing last valid aggregation: BW={:.2f}%, Latency={:.3f}ms' + LOGGER.warning(MSG.format( + self._last_valid_aggregation.bandwidth_utilization, + self._last_valid_aggregation.latency + )) + # Return a copy with updated timestamp + return AggregatedLinkSample( + timestamp=datetime.now(timezone.utc), + bandwidth_utilization=self._last_valid_aggregation.bandwidth_utilization, + latency=self._last_valid_aggregation.latency, + related_service_ids=self._last_valid_aggregation.related_service_ids.copy() + ) + else: + MSG = '[aggregate] No samples available and no cached data, returning zeros' + LOGGER.warning(MSG) + return AggregatedLinkSample(timestamp=datetime.now(timezone.utc)) + + agg = AggregatedLinkSample(timestamp=datetime.now(timezone.utc)) + for link_key, sample in self._samples.items(): + network_id, link_id = link_key + + MSG = '[aggregate] - Link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms, Services={:s}' + LOGGER.debug(MSG.format( + network_id, link_id, + sample.bandwidth_utilization, sample.latency, + str(sample.related_service_ids) + )) + agg.bandwidth_utilization = max( agg.bandwidth_utilization, sample.bandwidth_utilization ) @@ -59,4 +103,14 @@ class AggregationCache: agg.related_service_ids = agg.related_service_ids.union( sample.related_service_ids ) + + if num_samples > 0: + MSG = '[aggregate] Result: BW={:.2f}% (max), Latency={:.3f}ms (sum), Services={:s}' + LOGGER.info(MSG.format( + agg.bandwidth_utilization, agg.latency, + str(agg.related_service_ids) + )) + # Cache this valid aggregation for future use + self._last_valid_aggregation = agg + return agg diff --git a/src/simap_connector/service/telemetry/worker/data/Resources.py b/src/simap_connector/service/telemetry/worker/data/Resources.py index 49c16c3404d5de650fcd13239eafcf87b4a98abc..2f3de063526548230107e692a98258dc31981cdd 100644 --- a/src/simap_connector/service/telemetry/worker/data/Resources.py +++ b/src/simap_connector/service/telemetry/worker/data/Resources.py @@ -27,8 +27,8 @@ class ResourceNode: related_service_ids : List[str] = field(default_factory=list) def generate_samples(self, simap_client : SimapClient) -> None: - cpu_utilization = self.cpu_utilization_sampler.get_sample() - simap_node = simap_client.network(self.domain_name).node(self.node_name) + cpu_utilization, _ = self.cpu_utilization_sampler.get_sample() + simap_node = simap_client.network(self.domain_name).node(self.node_name) simap_node.telemetry.update( cpu_utilization.value, related_service_ids=self.related_service_ids ) @@ -36,15 +36,13 @@ class ResourceNode: @dataclass class ResourceLink: - domain_name : str - link_name : str - bandwidth_utilization_sampler : SyntheticSampler - latency_sampler : SyntheticSampler - related_service_ids : List[str] = field(default_factory=list) + domain_name : str + link_name : str + metrics_sampler : SyntheticSampler # Single sampler for both BW and latency + related_service_ids : List[str] = field(default_factory=list) def generate_samples(self, simap_client : SimapClient) -> None: - bandwidth_utilization = self.bandwidth_utilization_sampler.get_sample() - latency = self.latency_sampler.get_sample() + bandwidth_utilization, latency = self.metrics_sampler.get_sample() simap_link = simap_client.network(self.domain_name).link(self.link_name) simap_link.telemetry.update( bandwidth_utilization.value, latency.value, diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index b942971282afa6818bab7008bd531fd6e9739cbf..1bcbbb9bd7f445b01f5c9892970c512ff9a52f65 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -13,51 +13,96 @@ # limitations under the License. -import math, random, sys, threading +import random, threading from dataclasses import dataclass, field from datetime import datetime -from typing import Dict, Optional +from typing import Dict, Optional, Tuple from .Sample import Sample @dataclass class SyntheticSampler: - amplitude : float = field(default=0.0) - phase : float = field(default=0.0) - period : float = field(default=1.0) - offset : float = field(default=0.0) - noise_ratio : float = field(default=0.0) - min_value : float = field(default=-sys.float_info.max) - max_value : float = field(default=sys.float_info.max) + """Simple sampler with temporal continuity - next values stay close to previous values. + + Bandwidth ranges based on connection count: + 0 conns: avg=3%, range 1-10% + 1 conn: avg=25%, range 15-30% + 2 conns: avg=45%, range 35-55% + 3 conns: avg=65%, range 60-80% + 4+ conns: avg=85%, range 80-95% + + Latency uses bandwidth ranges divided by 10 (0-10ms): + 0 conns: avg=0.3ms, range 0.1-1.0ms + 1 conn: avg=2.5ms, range 1.5-3.0ms + 2 conns: avg=4.5ms, range 3.5-5.5ms + 3 conns: avg=6.5ms, range 6.0-8.0ms + 4+ conns: avg=8.5ms, range 8.0-9.5ms + + Values vary by ±1% between consecutive samples for temporal continuity. + """ + connection_count : int = field(default = 0) + link_capacity : float = field(default = 100.0) + prev_bw : Optional[float] = field(default = None) + prev_latency : Optional[float] = field(default = None) + + # Connection count to (avg, min, max) percentage mapping + # Latency uses same ranges divided by 10 (0-10ms range) + BW_RANGES = { + 0: (3, 5, 10), + 1: (25, 15, 30), + 2: (40, 35, 50), + 3: (60, 65, 80), + 4: (85, 80, 95), + } + LAT_RANGES = { + 0: (0.4, 0.1, 0.8), + 1: (1.4, 1.0, 1.8), + 2: (2.4, 2.0, 2.8), + 3: (3.4, 3.0, 3.8), + 4: (4.4, 4.0, 4.8), + } @classmethod def create_random( - cls, amplitude_scale : float, phase_scale : float, period_scale : float, - offset_scale : float, noise_ratio : float, - min_value : Optional[float] = None, max_value : Optional[float] = None + cls, + connection_count : int = 0, + link_capacity : float = 100.0 ) -> 'SyntheticSampler': - amplitude = amplitude_scale * random.random() - phase = phase_scale * random.random() - period = period_scale * random.random() - offset = offset_scale * random.random() + amplitude - if min_value is None: min_value = -sys.float_info.max - if max_value is None: max_value = sys.float_info.max - return cls(amplitude, phase, period, offset, noise_ratio, min_value, max_value) + """Factory method for compatibility (ignores unused parameters).""" + return cls(connection_count=connection_count, link_capacity=link_capacity) - def get_sample(self) -> Sample: - timestamp = datetime.timestamp(datetime.utcnow()) + def get_sample(self) -> Tuple[Sample, Sample]: + """Generate bandwidth and latency samples with temporal continuity. + + Returns: + Tuple of (bandwidth_sample, latency_sample) + """ + timestamp = datetime.now().timestamp() + conn_key = min(self.connection_count, 4) - waveform = math.sin(2 * math.pi * timestamp / self.period + self.phase) - waveform *= self.amplitude - waveform += self.offset - - noise = self.amplitude * random.random() - value = abs((1.0 - self.noise_ratio) * waveform + self.noise_ratio * noise) - - value = max(value, self.min_value) - value = min(value, self.max_value) - - return Sample(timestamp, 0, value) + avg, min_bw, max_bw = self.BW_RANGES[conn_key] + if self.prev_bw is None: + bw_utilization = avg + else: + noise_factor = random.uniform(-0.01, 0.01) # ±1% noise for bandwidth + bw_utilization = self.prev_bw * (1.0 + noise_factor) + + bw_utilization = max(min_bw, min(max_bw, bw_utilization)) + self.prev_bw = bw_utilization + + avg_lat, min_lat, max_lat = self.LAT_RANGES[conn_key] + if self.prev_latency is None: + latency = avg_lat + else: + noise_factor = random.uniform(-0.05, 0.05) # ±5% noise for latency + latency = self.prev_latency * (1.0 + noise_factor) + + latency = max(min_lat, min(max_lat, latency)) + self.prev_latency = latency + + # actual_bw_utilization = (bw_utilization / 100.0) * self.link_capacity + + return (Sample(timestamp, 0, bw_utilization), Sample(timestamp, 0, latency)) class SyntheticSamplers: @@ -66,22 +111,27 @@ class SyntheticSamplers: self._samplers : Dict[str, SyntheticSampler] = dict() def add_sampler( - self, sampler_name : str, amplitude_scale : float, phase_scale : float, - period_scale : float, offset_scale : float, noise_ratio : float + self, sampler_name : str, + connection_count : int = 0, + link_capacity : float = 100.0 ) -> None: with self._lock: if sampler_name in self._samplers: MSG = 'SyntheticSampler({:s}) already exists' raise Exception(MSG.format(sampler_name)) self._samplers[sampler_name] = SyntheticSampler.create_random( - amplitude_scale, phase_scale, period_scale, offset_scale, noise_ratio + connection_count=connection_count, + link_capacity=link_capacity ) def remove_sampler(self, sampler_name : str) -> None: with self._lock: self._samplers.pop(sampler_name, None) - def get_sample(self, sampler_name : str) -> Sample: + def get_sample(self, sampler_name : str) -> Tuple[Sample, Sample]: + """Get both bandwidth and latency samples. + Returns: Tuple of (bandwidth_sample, latency_sample) + """ with self._lock: sampler = self._samplers.get(sampler_name) if sampler_name not in self._samplers: diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json index 786a6df35d4a1623311a40c7357b77b25a07e2b7..ef050bbacab0a19b7909c783cf9634acb3b46633 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -115,4 +115,4 @@ } } ] -} \ No newline at end of file +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index f0875e25ea758d6c43866410dcbb720644da1aed..2bc13b12e3fc7ccc785f94a729144c66659e824c 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -115,4 +115,4 @@ } } ] -} \ No newline at end of file +} diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy.sh b/src/tests/ecoc25-f5ga-telemetry/deploy.sh index 4bdf8715d9826b9d609c2716d569fd9b47226065..66a6f6ffbe370f1a22aa27ac374d4515da899c5d 100755 --- a/src/tests/ecoc25-f5ga-telemetry/deploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy.sh @@ -14,7 +14,7 @@ # limitations under the License. -# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl # Get the current hostname HOSTNAME=$(hostname) @@ -22,10 +22,10 @@ echo "Deploying in ${HOSTNAME}..." case "$HOSTNAME" in - simap-server) - echo "Building SIMAP Server..." + simap-datastore) + echo "Building SIMAP DataStore..." cd ~/tfs-ctrl/ - docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . + docker buildx build -t simap-datastore:mock -f ./src/tests/tools/simap_datastore/Dockerfile . echo "Building NCE-FAN Controller..." cd ~/tfs-ctrl/ @@ -40,13 +40,13 @@ case "$HOSTNAME" in docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . echo "Cleaning up..." - docker rm --force simap-server + docker rm --force simap-datastore docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl docker rm --force traffic-changer echo "Deploying support services..." - docker run --detach --name simap-server --publish 8080:8080 simap-server:mock + docker run --detach --name simap-datastore --publish 8080:8080 simap-datastore:mock docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock diff --git a/src/tests/ecoc25-f5ga-telemetry/destroy.sh b/src/tests/ecoc25-f5ga-telemetry/destroy.sh index 47977562d859ecc85c3a56eebe483d3843769dd9..52cbd1353bfe1813958fc25192bb14cce175efb0 100755 --- a/src/tests/ecoc25-f5ga-telemetry/destroy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/destroy.sh @@ -14,7 +14,7 @@ # limitations under the License. -# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl # Get the current hostname HOSTNAME=$(hostname) @@ -22,9 +22,9 @@ echo "Destroying in ${HOSTNAME}..." case "$HOSTNAME" in - simap-server) + simap-datastore) echo "Cleaning up..." - docker rm --force simap-server + docker rm --force simap-datastore docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl docker rm --force traffic-changer diff --git a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh new file mode 100755 index 0000000000000000000000000000000000000000..62d3c587d655000cd0cb0cb83aed94d4192d4ba7 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set working directory +cd "$(dirname "$0")" || exit 1 + +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Collecting logs for ${HOSTNAME}..." + +rm logs -rf tmp/exec +mkdir -p tmp/exec + +case "$HOSTNAME" in + simap-datastore) + echo "Collecting Docker container logs..." + docker logs simap-datastore > tmp/exec/simap-datastore.log 2>&1 + docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 + docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 + docker logs traffic-changer > tmp/exec/traffic-changer.log 2>&1 + ;; + tfs-e2e-ctrl) + echo "Collecting TFS E2E Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/e2e-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/e2e-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/e2e-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/e2e-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/e2e-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/e2e-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/e2e-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/e2e-simap-connector.log + ;; + tfs-agg-ctrl) + echo "Collecting TFS Aggregation Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/agg-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/agg-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/agg-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/agg-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/agg-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/agg-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/agg-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/agg-simap-connector.log + ;; + tfs-ip-ctrl) + echo "Collecting TFS IP Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/ip-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/ip-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/ip-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/ip-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/ip-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/ip-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/ip-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/ip-simap-connector.log + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No logs to collect." + ;; +esac + +printf "\n" + +echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py b/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py index 86ee09dab9f2a76f41b710704a38567694a01fe7..559556829ba75396d865f05aae6bba40c71cb477 100644 --- a/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py +++ b/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py @@ -64,7 +64,7 @@ def main() -> None: stream_url = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, subscription_uri) print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) - with requests.get(stream_url, stream=True) as resp: + with requests.get(stream_url, stream=True, auth=auth) as resp: for line in resp.iter_lines(decode_unicode=True): print(line) diff --git a/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json b/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json new file mode 100644 index 0000000000000000000000000000000000000000..ba9c9d853638f38442d21c44b02fec183d46df61 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json @@ -0,0 +1,185 @@ +{ + "ietf-l3vpn-svc:l3vpn-svc": { + "sites": { + "site": [ + { + "devices": { + "device": [ + { + "device-id": "P-PE1", + "location": "access" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "access" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.1.101.22/24", + "lan-tag": "21", + "next-hop": "128.32.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_access", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE1", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "128.32.44.254", + "prefix-length": "24", + "provider-address": "128.32.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 20 + } + } + ] + } + } + }, + "svc-input-bandwidth": 1000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 5000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:hub-role", + "vpn-id": "slice25" + } + } + ] + } + }, + { + "devices": { + "device": [ + { + "device-id": "P-PE2", + "location": "cloud" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "cloud" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.16.104.221/24", + "lan-tag": "201", + "next-hop": "172.10.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_cloud", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE2", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "172.10.44.254", + "prefix-length": "24", + "provider-address": "172.10.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 10 + } + } + ] + } + } + }, + "svc-input-bandwidth": 5000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 1000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:spoke-role", + "vpn-id": "slice25" + } + } + ] + } + } + ] + }, + "vpn-services": { + "vpn-service": [ + { + "vpn-id": "slice25" + } + ] + } + } +} diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json new file mode 100644 index 0000000000000000000000000000000000000000..fd63bbabb1e51d31fc27fd5f964c005249c52b2a --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "initial_background_slice_1", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json b/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json new file mode 100644 index 0000000000000000000000000000000000000000..de69d29f92d8c7eeb1027a9deb0d61b781edb688 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "game_slice_on_ip_transport", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json new file mode 100644 index 0000000000000000000000000000000000000000..e76be75697b67c288aac35ae9e3d94f5bee029a1 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "another_background_slice_3", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json b/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json new file mode 100644 index 0000000000000000000000000000000000000000..54fd5b2a01c1afa35c04d4d4f7890e5fb5e29f62 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "game_slice_on_optical_transport", + "description": "network slice 2, PC1-VM2 - using optical transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP1", + "sdp-ip-address": ["172.16.204.220"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP1 to VM2", + "description": "AC POP1 connected to VM2", + "ac-node-id": "POP1", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line2", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "7000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "4000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json new file mode 100644 index 0000000000000000000000000000000000000000..2b4c1999acee3d0840f93e0b9322b630c3f235aa --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "another_background_slice_5", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json new file mode 100644 index 0000000000000000000000000000000000000000..3a2c4b96c8daaa282999ccc68916d77124f1294a --- /dev/null +++ b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L1/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json new file mode 100644 index 0000000000000000000000000000000000000000..cd0954ac1f95f99fd8a4d4174819fdb9edca99e8 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L2/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-agg.json b/src/tests/mwc26-f5ga/data/topology/topology-agg.json new file mode 100644 index 0000000000000000000000000000000000000000..c761a86dd4bfc3865b03a70d5f4fb86d291a283b --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-agg.json @@ -0,0 +1,95 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "agg"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.12"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "device_type": "nce", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "POP1"}}, "device_type": "packet-pop", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.220"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.201.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 101, + "site_location": "transport", "mtu": "1500" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "POP2"}}, "device_type": "packet-pop", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.101.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 201, + "site_location": "transport", "mtu": "1500" + }}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "L13"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "L14"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "O-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} + ]} + ] +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-e2e.json b/src/tests/mwc26-f5ga/data/topology/topology-e2e.json new file mode 100644 index 0000000000000000000000000000000000000000..117e97e61881da30a29027860f3927d9f98a88ab --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-e2e.json @@ -0,0 +1,43 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "e2e"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_SLICE"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.11"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "device_type": "nce", + "device_drivers": ["DEVICEDRIVER_NCE"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "L3"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "L4"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "O-PE1"}}, "endpoint_uuid": {"uuid": "200"}} + ]} + ] +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-ip.json b/src/tests/mwc26-f5ga/data/topology/topology-ip.json new file mode 100644 index 0000000000000000000000000000000000000000..cd772016009c21a7a36b8607d158845f9d1a5db2 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-ip.json @@ -0,0 +1,149 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "trans-pkt"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "P-PE1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.122.25"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "128.32.44.254", "address_prefix": "24", "vlan_tag": 21, + "site_location": "access", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.1.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.2.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-P1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.31"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.1.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.3.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-P2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.33"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.2.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.4.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-PE2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.32"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.10.44.254", "address_prefix": "24", "vlan_tag": 201, + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.3.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.4.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "L5"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L6"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L9"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L10"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "501"}} + ] + } + ] +} diff --git a/src/tests/mwc26-f5ga/deploy-specs-agg.sh b/src/tests/mwc26-f5ga/deploy-specs-agg.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7b5e98b50ebf7e057b36a6d7b0433b0c0e85a7e --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-agg.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy-specs-e2e.sh b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7b5e98b50ebf7e057b36a6d7b0433b0c0e85a7e --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy-specs-ip.sh b/src/tests/mwc26-f5ga/deploy-specs-ip.sh new file mode 100644 index 0000000000000000000000000000000000000000..c02dac122fb3dd8cbda547be25f268920cc4e5e5 --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-ip.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy.sh b/src/tests/mwc26-f5ga/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..4cda867d5d3d7ec614c99203cffa91e670311c85 --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy.sh @@ -0,0 +1,111 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/../../../.." && pwd)" + +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Deploying in ${HOSTNAME}..." + +case "$HOSTNAME" in + simap-datastore) + echo "Building SIMAP DataStore..." + cd "${REPO_ROOT}" + docker buildx build -t simap-datastore:mock -f ./src/tests/tools/simap_datastore/Dockerfile . + + echo "Building NCE-FAN Controller..." + docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . + + echo "Building NCE-T Controller..." + docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . + + echo "Building AI Engine..." + docker buildx build -t ai-engine:latest -f ./src/tests/tools/simap_ai_engine/ai_engine/Dockerfile . + + echo "Cleaning up..." + docker rm --force simap-datastore + docker rm --force nce-fan-ctrl + docker rm --force nce-t-ctrl + docker rm --force ai-engine + + echo "Deploying support services..." + docker run --detach --name simap-datastore --publish 8080:8080 \ + -e INFLUXDB_HOST=10.254.0.9 \ + -e INFLUXDB_PORT=8181 \ + simap-datastore:mock + + docker run --detach --name nce-fan-ctrl --publish 8081:8080 \ + --env SIMAP_ADDRESS=10.254.0.9 \ + --env SIMAP_PORT=8080 \ + nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8082:8080 \ + --env SIMAP_ADDRESS=10.254.0.9 \ + --env SIMAP_PORT=8080 \ + nce-t-ctrl:mock + + echo "Deploying AI Engine..." + docker run --detach --name ai-engine --publish 8084:8080 \ + --env SIMAP_DATASTORE_ADDRESS=10.254.0.9 \ + --env SIMAP_DATASTORE_PORT=8181 \ + --env SIMAP_DATASTORE_USERNAME=admin \ + --env SIMAP_DATASTORE_PASSWORD=admin \ + ai-engine:latest + # NOTE: If testing, run client (src/tests/tools/simap_server/run_client.sh) to manually populate SIMAP Server with telemetry data. + + sleep 2 + docker ps -a + ;; + tfs-e2e-ctrl) + echo "Deploying TFS E2E Controller..." + sed -i 's|\(