diff --git a/.gitignore b/.gitignore index 1b9e692a37af46fdb04e318d79eb08ac8e6e6eb5..b3806caaa7c5ad2a32d8089a86948b84570a5bb8 100644 --- a/.gitignore +++ b/.gitignore @@ -146,6 +146,7 @@ venv.bak/ # VSCode project settings .vscode/ +.github/ # Visual Studio project settings /.vs diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 91fa65af3742c2a5c51ed8dc40f1241b1bd930cb..035f8eeaf144eae71f88fcd29503d47c3f87a64f 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -43,7 +43,7 @@ spec: # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_datastore/build.sh # - ./src/tests/tools/simap_datastore/deploy.sh - value: "172.17.0.1" + value: "10.254.0.9" - name: SIMAP_DATASTORE_PORT # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_datastore/build.sh diff --git a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py index 04a8b8bd9cb4dca5908029918393abfa27780b38..bd66be2f7048be72347256c58a7cb33ec93444f8 100644 --- a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py +++ b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py @@ -49,7 +49,7 @@ class _Callback: @param old_data: Resource representation before retrieval, if applicable, otherwise `None` @returns boolean indicating whether additional callbacks should be executed, defaults to False ''' - MSG = 'match={:s}, path={:s}, old_data={:s}' + MSG = 'match={}, path={}, old_data={}' msg = MSG.format(match.groupdict(), path, old_data) raise NotImplementedError(msg) @@ -66,7 +66,7 @@ class _Callback: @param new_data: Resource representation after change, if applicable, otherwise `None` @returns boolean indicating whether additional callbacks should be executed, defaults to False ''' - MSG = 'match={:s}, path={:s}, old_data={:s}, new_data={:s}' + MSG = 'match={}, path={}, old_data={}, new_data={}' msg = MSG.format(match.groupdict(), path, old_data, new_data) raise NotImplementedError(msg) @@ -81,7 +81,7 @@ class _Callback: @param input_data: Input data, if applicable, otherwise `None` @returns Optional[Dict] containing output data, defaults to None ''' - MSG = 'match={:s}, path={:s}, input_data={:s}' + MSG = 'match={}, path={}, input_data={:s}' msg = MSG.format(match.groupdict(), path, input_data) raise NotImplementedError(msg) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 08f34b8ad12ae34ee38d94bb18ad762e958c18cb..1fe3b36f0e67b6a450102b62398e1b9847d20880 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -186,6 +186,7 @@ class IetfL3VpnDriver(_Driver): def SetConfig( self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: + LOGGER.info('SetConfig called with resources: {:s}'.format(str(resources))) results = [] if len(resources) == 0: return results with self.__lock: diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py index 3b537a4675db4d16c66756804eebe028a3f94791..68cbc448a63aadf013aecc00236fa92ec5dea2cd 100644 --- a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py @@ -259,10 +259,10 @@ def setup_config_rules( "/service[{:s}]/IETFL3VPN".format(service_uuid), l3_vpn_data_model, ), - #json_config_rule_set( - # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - # {"type": operation_type}, - #), + json_config_rule_set( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {"type": operation_type}, + ), ] return json_config_rules @@ -274,10 +274,10 @@ def teardown_config_rules(service_uuid: str) -> List[Dict]: "/service[{:s}]/IETFL3VPN".format(service_uuid), {"id": service_uuid}, ), - #json_config_rule_delete( - # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - # {}, - #), + json_config_rule_delete( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {"type": "delete"}, + ), ] return json_config_rules diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 8aafffc1aa79090c2ad7bda0b50020c924cae7cb..3917772d165f6fb896408c4cfd474d7f7aab0b17 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -22,7 +22,7 @@ from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from device.client.DeviceClient import DeviceClient from simap_connector.service.telemetry.worker.SynthesizerWorker import SynthesizerWorker -from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum +from simap_connector.service.telemetry.worker._Worker import _Worker, WorkerTypeEnum from .database.Subscription import subscription_get, subscription_set, subscription_delete from .database.SubSubscription import ( sub_subscription_list, sub_subscription_set, sub_subscription_delete @@ -165,13 +165,20 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): link_id = request.link_id bandwidth_factor = request.bandwidth_factor latency_factor = request.latency_factor + # connection_count = request.connection_count + # TODO: Remove bandwidth_factor and latency_factor from the request, as they are not used in the current implementation. + # Add connection_count to the request. + + connection_count = 0 synthesizer_name = '{:s}:{:s}'.format(network_id, link_id) - synthesizer : Optional[SynthesizerWorker] = self._telemetry_pool.get_worker( - WorkerTypeEnum.SYNTHESIZER, synthesizer_name + synthesizer : Optional[_Worker] = self._telemetry_pool.get_worker( + WorkerTypeEnum.SYNTHESIZER, synthesizer_name ) if synthesizer is None: MSG = 'Synthesizer({:s}) not found' raise Exception(MSG.format(synthesizer_name)) - synthesizer.change_resources(bandwidth_factor, latency_factor) + assert isinstance(synthesizer, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(synthesizer).__name__) + synthesizer.change_resources(connection_count) return Empty() diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py index 024f8d70896d9555a0eb51f2730e6b208726ddb6..1e143700ea1f78c9babe886d80d082f2b7b90b0e 100644 --- a/src/simap_connector/service/Tools.py +++ b/src/simap_connector/service/Tools.py @@ -62,24 +62,34 @@ def discover_link_details(restconf_client : RestConfClient, xpath_filter : str) network_id, link_id = link_xpath_match.groups() link_details = LinkDetails(Link(network_id, link_id)) - xpath_filter = link_details.link.get_xpath_filter(add_simap_telemetry=False) - xpath_data = restconf_client.get(xpath_filter) + # Workaround: RESTCONF server doesn't support namespace-prefixed child element paths + # Query at network level and filter the link from response + network_xpath = '/ietf-network:networks/network={:s}'.format(network_id) + xpath_data = restconf_client.get(network_xpath) if not xpath_data: - raise Exception('Resource({:s}) not found in SIMAP Server'.format(str(xpath_filter))) - - links = xpath_data.get('ietf-network-topology:link', list()) - if len(links) == 0: - raise Exception('Link({:s}) not found'.format(str(xpath_filter))) - if len(links) > 1: - raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter))) - link = links[0] - if link['link-id'] != link_id: - MSG = 'Retieved Link({:s}) does not match xpath_filter({:s})' - raise Exception(MSG.format(str(link), str(xpath_filter))) + raise Exception('Network({:s}) not found in SIMAP Server'.format(str(network_xpath))) + + # Extract network data from response + networks = xpath_data.get('ietf-network:network', []) + if len(networks) == 0: + raise Exception('Network({:s}) not found in response'.format(network_id)) + network_data = networks[0] + + # Find the target link + links = network_data.get('ietf-network-topology:link', list()) + link = None + for l in links: + if l['link-id'] == link_id: + link = l + break + + if link is None: + raise Exception('Link({:s}) not found in network({:s})'.format(link_id, network_id)) + supporting_links = link.get('supporting-link', list()) if len(supporting_links) == 0: - MSG = 'No supporting links found for Resource({:s}, {:s})' - raise Exception(MSG.format(str(xpath_filter), str(xpath_data))) + MSG = 'No supporting links found for Link({:s}) in Network({:s})' + raise Exception(MSG.format(str(link_id), str(network_id))) for sup_link in supporting_links: link_details.supporting_links.append(Link( diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 2f5e1146ba00931dfabd0603c1abfa78c817520c..b8782ff9c24fece1bb1189f8b70c3f47d4ec8907 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -24,12 +24,12 @@ from simap_connector.Config import ( SIMAP_DATASTORE_SCHEME, SIMAP_DATASTORE_ADDRESS, SIMAP_DATASTORE_PORT, SIMAP_DATASTORE_USERNAME, SIMAP_DATASTORE_PASSWORD, ) -from .database.Engine import Engine -from .database.models._Base import rebuild_database -from .simap_updater.SimapClient import SimapClient +from .database.Engine import Engine +from .database.models._Base import rebuild_database +from .simap_updater.SimapClient import SimapClient from .simap_updater.SimapUpdater import SimapUpdater -from .telemetry.TelemetryPool import TelemetryPool -from .SimapConnectorService import SimapConnectorService +from .telemetry.TelemetryPool import TelemetryPool +from .SimapConnectorService import SimapConnectorService TERMINATE = threading.Event() diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index e01d78451897a14b9994ba5a748432d90f0b6b6b..9c34b87ae029a744c6f0f64679b5c409585d1307 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,8 +13,27 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, - 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', - 'L11ba', 'L12ab', 'L12ba', 'L13', 'L14' }, - 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, + 'e2e' : { 'L1', 'L2' }, + 'agg' : { 'L14' }, + 'trans-pkt': { 'L3', 'L5', 'L6', 'L9', 'L10', 'L13' }, + # The remaining can not be monitored therefore they are not included in the allowed links for the controllers + # 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', 'L11ba', 'L12ab', 'L12ba', }, +} +# NOTE: Ranges should be less than 100 because the schema does not allow +# bandwidth-utilization to exceed 100% +# As per schema below: (percentage of link capacity) +# /* --- Local typedefs --- */ + # typedef percent { + # type decimal64 { + # fraction-digits 2; + # range "0 .. 100"; + # } + # units "percent"; + # description "0–100 percent value."; + # } +LINKS_CAPACITY = { + 'L1' : 100, 'L2' : 100, 'L3' : 100, 'L4' : 100, + 'L5' : 100, 'L6' : 100, 'L9' : 100, 'L10' : 100, + 'L7ab' : 100, 'L7ba' : 100, 'L8ab' : 100, 'L8ba' : 100, 'L11ab' : 100, + 'L11ba' : 100, 'L12ab': 100, 'L12ba': 100, 'L13' : 100, 'L14' : 100, } diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index d8b04f8d4dd8b07d9bf4d6c3ef01e5190c350aaa..98dc9a92375189d5eff5de10f38790f35f025c84 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -14,12 +14,13 @@ import logging -from enum import Enum +from enum import Enum from typing import Any, Dict, List, Optional, Tuple -from common.tools.context_queries.Device import get_device, get_devices -from common.tools.context_queries.Link import get_link, get_links -from common.tools.context_queries.Topology import get_topology, get_topologies -from common.tools.context_queries.Service import get_service_by_uuid, get_services +from common.tools.context_queries.Device import get_device, get_devices +from common.tools.context_queries.Link import get_link, get_links +from common.tools.context_queries.Topology import get_topology, get_topologies +from common.tools.context_queries.Service import get_service_by_uuid, get_services +from common.tools.context_queries.Connection import get_connection_by_uuid from context.client.ContextClient import ContextClient @@ -41,7 +42,7 @@ KEY_LENGTHS = { CachedEntities.ENDPOINT : 2, CachedEntities.LINK : 1, CachedEntities.SERVICE : 1, - CachedEntities.CONNECTION : 3, + CachedEntities.CONNECTION : 1, } @@ -63,6 +64,7 @@ class ObjectCache: def __init__(self, context_client : ContextClient): self._context_client = context_client self._object_cache : Dict[Tuple[str, str], Any] = dict() + # self.populate_all_cache() # NOTE: Added for testing purposes; should be removed/commented. def get( self, entity : CachedEntities, *object_uuids : str, @@ -113,6 +115,10 @@ class ObjectCache: object_inst = get_service_by_uuid( self._context_client, object_uuids[0], rw_copy=False ) + elif entity == CachedEntities.CONNECTION: + object_inst = get_connection_by_uuid( + self._context_client, object_uuids[0], rw_copy=False + ) else: MSG = 'Not Supported ({:s}, {:s})' LOGGER.warning(MSG.format(str(entity.value).title(), str(object_uuids))) @@ -124,7 +130,9 @@ class ObjectCache: return None self.set(entity, object_inst, object_uuids[0]) - self.set(entity, object_inst, object_inst.name) + # Connections don't have a name field, so skip setting by name + if entity != CachedEntities.CONNECTION: + self.set(entity, object_inst, object_inst.name) if entity == CachedEntities.DEVICE: device_uuid = object_inst.device_id.device_uuid.uuid @@ -180,7 +188,9 @@ class ObjectCache: for (object_uuid, object_name), object_inst in objects.items(): self.set(entity, object_inst, object_uuid) - self.set(entity, object_inst, object_name) + # Connections don't have a name field (object_name is same as UUID), so skip redundant set + if entity != CachedEntities.CONNECTION: + self.set(entity, object_inst, object_name) if entity == CachedEntities.DEVICE: for endpoint in object_inst.device_endpoints: @@ -199,3 +209,19 @@ class ObjectCache: def delete(self, entity : CachedEntities, *object_uuids : str) -> None: object_key = compose_object_key(entity, *object_uuids) self._object_cache.pop(object_key, None) + + def populate_all_cache(self) -> None: + """Populate cache with all entities for testing purposes.""" + LOGGER.info('Populating cache with all entities for testing...') + for entity in CachedEntities: + if entity in (CachedEntities.ENDPOINT, CachedEntities.CONNECTION): + # Endpoints are populated when devices are updated + # Connections are service-scoped; cached on-demand during events + continue + try: + self._update_all(entity) + # LOGGER.info('Populated cache for entity: {:s}'.format(entity.value)) + except Exception as e: + LOGGER.warning('Failed to populate cache for entity {:s}: {:s}'.format( + entity.value, str(e))) + LOGGER.info('Cache population completed') diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py new file mode 100644 index 0000000000000000000000000000000000000000..184df61e95fc608e36ab8ea28b0a1fcf260d8412 --- /dev/null +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -0,0 +1,283 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from typing import Dict, List, Tuple +from context.client.ContextClient import ContextClient +from common.tools.context_queries.Device import get_device +from .SimapClient import SimapClient + + +LOGGER = logging.getLogger(__name__) + +def extract_network_data(context_client: ContextClient, network_id: str, network_connection: dict) -> list[tuple[str, dict]]: + + try: + # Extract path_hops_endpoint_ids from network_connection dict + path_hops = network_connection.get('path_hops_endpoint_ids', []) + + if not path_hops: + LOGGER.warning(f"No path_hops_endpoint_ids found in network_connection for network {network_id}") + return [] + + if len(path_hops) < 2: + LOGGER.warning(f"Connection path too short (less than 2 hops) for network {network_id}") + return [] + + # Extract first and last hops (SDPs - Service Demarcation Points) + first_hop = path_hops[0] + last_hop = path_hops[-1] + + # Extract device and endpoint UUIDs for SDPs + first_device_uuid = first_hop.get('device_id', {}).get('device_uuid', {}).get('uuid', '') + first_endpoint_uuid = first_hop.get('endpoint_uuid', {}).get('uuid', '') + + last_device_uuid = last_hop.get('device_id', {}).get('device_uuid', {}).get('uuid', '') + last_endpoint_uuid = last_hop.get('endpoint_uuid', {}).get('uuid', '') + + if not all([first_device_uuid, first_endpoint_uuid, last_device_uuid, last_endpoint_uuid]): + LOGGER.warning(f"Invalid first or last hop in path_hops_endpoint_ids for network {network_id}") + return [] + + # Prepare results for exactly 2 SDPs + network_data: List[Tuple[str, Dict[str, List[str]]]] = [] + + # Process first device (sdp1) + try: + first_device = get_device( + context_client, first_device_uuid, rw_copy=False, + include_endpoints=True, include_config_rules=False, include_components=False + ) + if first_device is None: + LOGGER.warning(f"First device with UUID {first_device_uuid} not found in context") + return [] + + first_device_name = first_device.name + + # Find the service-facing endpoint name + first_endpoint_name = None + for endpoint in first_device.device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == first_endpoint_uuid: + first_endpoint_name = endpoint.name + break + + if not first_endpoint_name: + LOGGER.warning(f"First endpoint {first_endpoint_uuid} not found in device {first_device_name}") + return [] + + network_data.append((first_device_name, {'termination_points': [first_endpoint_name]})) + + except Exception as e: + LOGGER.error(f"Error retrieving first device {first_device_uuid} from context: {e}") + return [] + + # Process last device (sdp2) + try: + last_device = get_device( + context_client, last_device_uuid, rw_copy=False, + include_endpoints=True, include_config_rules=False, include_components=False + ) + if last_device is None: + LOGGER.warning(f"Last device with UUID {last_device_uuid} not found in context") + return [] + + last_device_name = last_device.name + + # Find the service-facing endpoint name + last_endpoint_name = None + for endpoint in last_device.device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == last_endpoint_uuid: + last_endpoint_name = endpoint.name + break + + if not last_endpoint_name: + LOGGER.warning(f"Last endpoint {last_endpoint_uuid} not found in device {last_device_name}") + return [] + + network_data.append((last_device_name, {'termination_points': [last_endpoint_name]})) + + except Exception as e: + LOGGER.error(f"Error retrieving last device {last_device_uuid} from context: {e}") + return [] + + LOGGER.info(f"Extracted network data for {network_id}: {network_data}") + return network_data + + except Exception as e: + LOGGER.error(f"Error extracting network data from connection for network {network_id}: {e}") + return [] + + +def set_simap_network(context_client: ContextClient, simap_client: SimapClient, network_id: str, network_connection: dict) -> None: + """ + Configure a SIMAP network with preset configurations. + + Args: + context_client: ContextClient instance + simap_client: SimapClient instance + network_id: Network identifier ('e2e', 'agg', or 'trans-pkt') + network_connection: Dictionary representation of Connection protobuf with path_hops_endpoint_ids + """ + + LOGGER.info(f"Setting SIMAP network: {network_id} for connection with {len(network_connection.get('path_hops_endpoint_ids', []))} hops") + network_data : list[tuple[str, dict]] = extract_network_data(context_client, network_id, network_connection) + + if network_id == 'e2e': + try: + # E2E Network Configuration + simap = simap_client.network('e2e') + simap.update(supporting_network_ids=['admin', 'agg']) + + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] + + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for E2E network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('E2E-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L1'), ('agg', 'AggNet-L1') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring E2E network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring E2E network: {e}') + return + + elif network_id == 'agg': + try: + # Aggregation Network Configuration + simap = simap_client.network('agg') + simap.update(supporting_network_ids=['admin', 'trans-pkt']) + + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Aggregation network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('AggNet-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('trans-pkt', 'Trans-L1'), ('admin', 'L13'), ('admin', 'L3') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring Aggregation network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring Aggregation network: {e}') + return + + elif network_id == 'trans-pkt': + try: + # Transport Packet Network Configuration + simap = simap_client.network('trans-pkt') + simap.update(supporting_network_ids=['admin']) + + # Configure nodes + node_names = ['site1', 'site2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Transport Packet network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('Trans-L1') + link.update( + 'site1', endpoints[0], 'site2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L6'), ('admin', 'L10') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring Transport Packet network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring Transport Packet network: {e}') + return + + else: + MSG = 'Unsupported network_id({:s}) to set SIMAP' + LOGGER.warning(MSG.format(str(network_id))) + return + + LOGGER.info(f'Successfully configured SIMAP network: {network_id}') + + +def delete_simap_network(simap_client: SimapClient, network_id: str) -> None: + """ + Delete a SIMAP network configuration. + + Args: + simap_client: SimapClient instance + network_id: Network identifier ('e2e', 'agg', or 'trans-pkt') + """ + if network_id == 'e2e': + simap = simap_client.network('e2e') + simap.update(supporting_network_ids=['admin', 'agg']) + + link = simap.link('E2E-L1') + link.delete() + + elif network_id == 'agg': + simap = simap_client.network('agg') + simap.update(supporting_network_ids=['admin', 'trans-pkt']) + + link = simap.link('AggNet-L1') + link.delete() + + elif network_id == 'trans-pkt': + simap = simap_client.network('trans-pkt') + simap.update(supporting_network_ids=['admin']) + + link = simap.link('Trans-L1') + link.delete() + + else: + MSG = 'Unsupported network_id({:s}) to delete SIMAP' + LOGGER.warning(MSG.format(str(network_id))) + return + + LOGGER.info(f'Successfully deleted SIMAP network: {network_id}') diff --git a/src/simap_connector/service/simap_updater/SimapClient.py b/src/simap_connector/service/simap_updater/SimapClient.py index 725b08bd47e0bd127cf0f7c4131cb744313b149d..8cdf4708ee401441f401a8d5f919f840e1c624c3 100644 --- a/src/simap_connector/service/simap_updater/SimapClient.py +++ b/src/simap_connector/service/simap_updater/SimapClient.py @@ -64,7 +64,7 @@ class TerminationPoint: class NodeTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -173,7 +173,7 @@ class Node: class LinkTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client @@ -197,8 +197,8 @@ class LinkTelemetry: def get(self) -> Dict: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) - telemetry : Dict = self._restconf_client.get(endpoint) - return telemetry + link : Dict = self._restconf_client.get(endpoint) + return link.get('ietf-network-topology:link', [{}])[0].get('simap-telemetry:simap-telemetry', {}) def update( self, bandwidth_utilization : float, latency : float, @@ -210,14 +210,17 @@ class LinkTelemetry: 'latency' : '{:.3f}'.format(latency), } if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids - link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} - network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} payload = {'ietf-network:networks': {'network': [network]}} self._restconf_client.patch(endpoint, payload) def delete(self) -> None: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) - self._restconf_client.delete(endpoint) + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': {}} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) class Link: diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 573085ac9182fac7f3d77740f2876146f8d394de..4abe81a52197b4b45925f6efdf0c3f4a8e27c7df 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -14,28 +14,28 @@ import logging, queue, threading, uuid -from typing import Any, Optional, Set +from typing import Any, List, Optional, Set, Tuple from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( - ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, + ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, ServiceStatusEnum, SliceEvent, TopologyEvent, ConnectionEvent ) -from common.tools.grpc.BaseEventCollector import BaseEventCollector +from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.grpc.Tools import grpc_message_to_json_string, grpc_message_to_json from context.client.ContextClient import ContextClient from simap_connector.service.telemetry.worker.data.Resources import ( ResourceLink, Resources, SyntheticSampler ) from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum -from simap_connector.service.telemetry.TelemetryPool import TelemetryPool -from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER -from .MockSimaps import delete_mock_simap, set_mock_simap -from .ObjectCache import CachedEntities, ObjectCache -from .SimapClient import SimapClient -from .Tools import get_device_endpoint, get_link_endpoint #, get_service_endpoint - +from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool +from .RealSimaps import set_simap_network, delete_simap_network +from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER, LINKS_CAPACITY +# from .MockSimaps import delete_mock_simap, set_mock_simap +from .ObjectCache import CachedEntities, ObjectCache +from .SimapClient import SimapClient +from .Tools import get_device_endpoint, get_link_endpoint, get_connection_endpoints_and_links #, get_service_endpoint LOGGER = logging.getLogger(__name__) @@ -51,16 +51,16 @@ SKIPPED_DEVICE_TYPES = { class EventDispatcher(BaseEventDispatcher): def __init__( self, events_queue : queue.PriorityQueue, - simap_client : SimapClient, - context_client : ContextClient, - telemetry_pool : TelemetryPool, - terminate : Optional[threading.Event] = None + simap_client : SimapClient, + context_client : ContextClient, + telemetry_pool : TelemetryPool, + terminate : Optional[threading.Event] = None ) -> None: super().__init__(events_queue, terminate) - self._simap_client = simap_client - self._context_client = context_client - self._telemetry_pool = telemetry_pool - self._object_cache = ObjectCache(self._context_client) + self._simap_client = simap_client + self._context_client = context_client + self._telemetry_pool = telemetry_pool + self._object_cache = ObjectCache(self._context_client) self._skipped_devices : Set[str] = set() @@ -357,31 +357,19 @@ class EventDispatcher(BaseEventDispatcher): te_link = te_topo.link(link_name) te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) - resources = Resources() - resources.links.append(ResourceLink( - domain_name=topology_name, link_name=link_name, - bandwidth_utilization_sampler=SyntheticSampler.create_random( - amplitude_scale = 25.0, - phase_scale = 1e-7, - period_scale = 86_400, - offset_scale = 25, - noise_ratio = 0.05, - min_value = 0.0, - max_value = 100.0, - ), - latency_sampler=SyntheticSampler.create_random( - amplitude_scale = 0.5, - phase_scale = 1e-7, - period_scale = 60.0, - offset_scale = 10.0, - noise_ratio = 0.05, - min_value = 0.0, - ), - related_service_ids=[], - )) - sampling_interval = 1.0 - self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + # worker_name = '{:s}:{:s}'.format(topology_name, link_name) + # resources = Resources() + # resources.links.append(ResourceLink( + # domain_name = topology_name, + # link_name = link_name, + # metrics_sampler = SyntheticSampler.create_random( + # connection_count = 0, + # link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + # ), + # related_service_ids=[], + # )) + # sampling_interval = 1.0 + # self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) return True @@ -448,8 +436,8 @@ class EventDispatcher(BaseEventDispatcher): self._object_cache.delete(CachedEntities.LINK, link_uuid) self._object_cache.delete(CachedEntities.LINK, link_name) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) - self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + # worker_name = '{:s}:{:s}'.format(topology_name, link_name) + # self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) MSG = 'Link Removed: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -491,17 +479,17 @@ class EventDispatcher(BaseEventDispatcher): # LOGGER.warning(MSG.format(str_service_event, str_service)) # return False - topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) - topology_names = {t.name for t in topologies} - topology_names.discard(DEFAULT_TOPOLOGY_NAME) - if len(topology_names) != 1: - MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' - str_service_event = grpc_message_to_json_string(service_event) - LOGGER.warning(MSG.format(str_service_event)) - return False + # topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + # topology_names = {t.name for t in topologies} + # topology_names.discard(DEFAULT_TOPOLOGY_NAME) + # if len(topology_names) != 1: + # MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' + # str_service_event = grpc_message_to_json_string(service_event) + # LOGGER.warning(MSG.format(str_service_event)) + # return False - domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - set_mock_simap(self._simap_client, domain_name) + # domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net + # set_mock_simap(self._simap_client, domain_name) #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() @@ -611,7 +599,7 @@ class EventDispatcher(BaseEventDispatcher): return domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - delete_mock_simap(self._simap_client, domain_name) + # delete_mock_simap(self._simap_client, domain_name) #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() @@ -663,57 +651,322 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Processing Connection Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(connection_event))) - # Here a connection object from context is received in connection_event. - # Here is gRPC message definition: message Connection { ConnectionId connection_id = 1; ServiceId service_id = 2; repeated EndPointId path_hops_endpoint_ids = 3; repeated ServiceId sub_service_ids = 4; ConnectionSettings settings = 5;} - # discard sub_service_ids and settings for now, as not used in SIMAP population. - # Extract service_id, endpoint_ids from connection_event to identify the connection. - # Get all links using gRPC ListLinkIds() from context, and find which link(s) correspond to the connection's endpoint_ids. - # Then update SIMAP accordingly. - # Then, do this only for connections that correspond to links that this controller is allowed to manage, as per ALLOWED_LINKS_PER_CONTROLLER. - # Then, do something like this (pseudocode): - # worker_name = '{:s}:{:s}'.format(topology_name, link_name) - # resources = Resources() - # resources.links.append(ResourceLink( - # domain_name=topology_name, link_name=link_name, - # bandwidth_utilization_sampler=SyntheticSampler.create_random( - # amplitude_scale = 25.0, - # phase_scale = 1e-7, - # period_scale = 86_400, - # offset_scale = 25, - # noise_ratio = 0.05, - # min_value = 0.0, - # max_value = 100.0, - # ), - # latency_sampler=SyntheticSampler.create_random( - # amplitude_scale = 0.5, - # phase_scale = 1e-7, - # period_scale = 60.0, - # offset_scale = 10.0, - # noise_ratio = 0.05, - # min_value = 0.0, - # ), - # related_service_ids=[], - # )) - # sampling_interval = 1.0 - # self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + # Extract connection UUID from event + connection_uuid = connection_event.connection_id.connection_uuid.uuid + + # Clean up any stale mapping for this connection (e.g., if connection is being re-created) + old_mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + if old_mapping is not None and isinstance(old_mapping, dict) and 'domain' in old_mapping: + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + LOGGER.debug('Removed stale mapping for connection {:s} before processing'.format(connection_uuid)) + + try: + # Use common helper to prepare connection data + result = self._prepare_connection_processing(connection_uuid) + if result is None: + return False + (domain_name, processed_links) = result + + # Update telemetry for each link involved in this connection + for link_uuid, link_name, link_topology_name in processed_links: + # Count active connections on this link + active_conn_count = self._count_active_connections(link_uuid, domain_name) + LOGGER.info('Connection {:s} uses allowed link: {:s} (uuid: {:s})'.format(connection_uuid, link_name, link_uuid)) + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + + # --- TEMPORTYY: Check for special triggering rules for L6 in trans-pkt domain --- + if link_name == "L6": + # Check for special triggering rules (e.g., L6 triggers L3 and L13) + triggered_links = self._check_and_trigger_additional_links( + link_topology_name, active_conn_count) + # Update the cached mapping to include triggered links + if triggered_links: + mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + if mapping and isinstance(mapping, dict): + mapping['triggered_links'] = triggered_links + self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) + LOGGER.debug('Updated connection {:s} mapping with {:d} triggered links'.format( + connection_uuid, len(triggered_links))) + # --- END OF TEMPORARY LOGIC --- + + # Worker should already exist from _dispatch_link_set (link creation event) + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + LOGGER.warning('Worker not found for link {:s}, creating and starting new worker'.format(link_name)) + + # Create worker with same parameters as in _dispatch_link_set + resources = Resources() + resources.links.append(ResourceLink( + domain_name = link_topology_name, + link_name = link_name, + metrics_sampler = SyntheticSampler.create_random( + connection_count = active_conn_count, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + ), + related_service_ids=[], # TODO: populate with actual related services if needed (later) + )) + sampling_interval = 1.0 + self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + LOGGER.info('Started new synthesizer worker: {:s}'.format(worker_name)) + else: + # Worker exists, update connection count for congestion simulation + worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + assert isinstance(worker, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + + worker.change_resources(active_conn_count) + LOGGER.info('Updated telemetry of already running worker: link {:s}, connection_count={:d}'.format( + link_name, active_conn_count)) + + except Exception as e: + LOGGER.exception('Failed to process connection event {:s}: {:s}'.format(connection_uuid, str(e))) + return False return True + # TEMPORARY: This function implements the special triggering rules for L6 in trans-pkt domain. + def _check_and_trigger_additional_links( + self, link_topology_name: str, active_conn_count: int + ) -> List[Tuple[str, str, str]]: + """ + Check for special triggering rules and start additional workers. + + Rule: When L6 is processed in trans-pkt domain, also start workers for L3 and L13. + + Args: + connection_uuid: UUID of the connection being processed + domain_name: Domain name (e.g., 'trans-pkt') + processed_links: List of (link_uuid, link_name, link_topology_name) already processed + + Returns: + List of triggered links with format: (link_uuid, link_name, topology_name) + """ + triggered_links = [] + + # Trigger workers for L3 and L13 using same topology as L6 + for link_name in ['L3', 'L13']: + # Generate UUID for the triggered link + link_uuid = str(uuid.uuid4()) + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + + LOGGER.info('Triggering worker for link {:s} (generated uuid: {:s})'.format(link_name, link_uuid)) + + # Check if worker already exists + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + # Create and start worker + resources = Resources() + resources.links.append(ResourceLink( + domain_name = link_topology_name, + link_name = link_name, + metrics_sampler = SyntheticSampler.create_random( + connection_count = active_conn_count, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + ), + related_service_ids = [], + )) + sampling_interval = 1.0 + self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + LOGGER.info('Started triggered synthesizer worker: {:s}'.format(worker_name)) + else: + LOGGER.info('Worker {:s} already exists, skipping creation'.format(worker_name)) + + triggered_links.append((link_uuid, link_name, link_topology_name)) + + return triggered_links + + + def _prepare_connection_processing(self, connection_uuid: str): + """ + Extract common logic for processing connection events. + + Returns: + Tuple of ( domain_name, processed_links) or None if failed + """ + # Get the connection object + connection = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid) + if connection is None: + LOGGER.warning('Connection {:s} not found in cache'.format(connection_uuid)) + return None + + MSG = 'Processing Connection: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(connection))) + + _, link_uuids = get_connection_endpoints_and_links(connection_uuid) + + # Determine the controller's domain name (network_id) + topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + topology_names = {t.name for t in topologies} + topology_names.discard(DEFAULT_TOPOLOGY_NAME) + if len(topology_names) != 1: + LOGGER.warning('Unable to identify self-controller for connection {:s} and {!r}'.format(connection_uuid, topology_names)) + return None + domain_name = topology_names.pop() + + # Call set_simap_network with proper parameters + network_connection = grpc_message_to_json(connection) + set_simap_network(self._context_client, self._simap_client, domain_name, network_connection) + LOGGER.info('Set SIMAP network for connection {:s} in domain {:s}'.format(connection_uuid, domain_name)) + + # Filter links based on ALLOWED_LINKS_PER_CONTROLLER + allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(domain_name, set()) + LOGGER.debug('Allowed links for domain {:s}: {:s}'.format(domain_name, str(allowed_link_names))) + processed_links = [] + for link_uuid in link_uuids: + link = self._object_cache.get(CachedEntities.LINK, link_uuid) + if link.name in allowed_link_names: + # Get the link's topology for worker naming + link_topology_uuid, _ = get_link_endpoint(link) + link_topology = self._object_cache.get(CachedEntities.TOPOLOGY, link_topology_uuid) + processed_links.append((link_uuid, link.name, link_topology.name)) + + if not processed_links: + LOGGER.debug('Connection {:s} has no allowed links for domain {:s}'.format( + connection_uuid, domain_name)) + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + return None + + # Cache the connection-to-links mapping for later retrieval (e.g., during REMOVE events) + mapping = { + 'domain': domain_name, + 'links': {link_uuid: {'name': link_name, 'topology': link_topo_name} for link_uuid, link_name, link_topo_name in processed_links}, + 'triggered_links': [] # Will store additional links triggered by special rules + } + self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) + LOGGER.debug('Cached connection {:s} mapping with {:d} links for domain {:s}'.format( + connection_uuid, len(processed_links), domain_name)) + + return domain_name, processed_links + + + def _count_active_connections(self, link_uuid: str, domain_name: str, ) -> int: + """ + Count active connections using a specific link. + + Args: + link_uuid: UUID of the link to count connections for + domain_name: Domain name to filter connections + Returns: + int: Number of active connections using this link + """ + + all_cached_connections = self._object_cache.get_all(CachedEntities.CONNECTION, fresh=False) + active_count = 0 + for cached_obj in all_cached_connections: + if not isinstance(cached_obj, dict) or 'domain' not in cached_obj or 'links' not in cached_obj: + continue + + if cached_obj['domain'] != domain_name: + continue + + if link_uuid in cached_obj['links']: + active_count += 1 + + LOGGER.info('Active connection count on link {:s} in domain {:s}: {:d}'.format( + link_uuid, domain_name, active_count)) + return active_count + + def dispatch_connection_create(self, connection_event : ConnectionEvent) -> None: if not self.dispatch_connection_set(connection_event): return MSG = 'Skipping Connection Create Event: {:s}' LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) - + + def dispatch_connection_update(self, connection_event : ConnectionEvent) -> None: if not self.dispatch_connection_set(connection_event): return MSG = 'Skipping Connection Update Event: {:s}' LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) - + + def dispatch_connection_remove(self, connection_event : ConnectionEvent) -> None: - MSG = 'Skipping Connection Remove Event: {:s}' - LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) + MSG = 'Processing Connection Remove Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(connection_event))) + + connection_uuid = connection_event.connection_id.connection_uuid.uuid + + try: + mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + LOGGER.info('Retrieved mapping for connection {:s}: {:s}'.format(connection_uuid, str(mapping))) + if mapping is None: + MSG = 'Connection {:s} not managed by this controller (not in allowed links), skipping removal' + LOGGER.debug(MSG.format(connection_uuid)) + return + + # Defensive: distinguish mapping dicts from potential protobuf Connection objects + elif not isinstance(mapping, dict) or 'domain' not in mapping or 'links' not in mapping: + MSG = 'Invalid mapping structure for connection {:s}: expected dict with domain/links keys' + raise Exception(MSG.format(connection_uuid)) + + # Extract domain and links from cached mapping + domain_name = mapping['domain'] + link_uuids_dict = mapping['links'] + processed_links = [(link_uuid, link_data['name'], link_data['topology']) for link_uuid, link_data in link_uuids_dict.items()] + + LOGGER.info('Retrieved cached mapping for connection {:s}: domain={:s}, links={:d}'.format( + connection_uuid, domain_name, len(processed_links))) + + # Delete the connection from cache first (we already have the mapping) + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + LOGGER.debug('Deleted cached mapping for connection {:s}'.format(connection_uuid)) + + # Process each link: count remaining connections and stop/update worker accordingly + all_links_stopped = True # Track if all links have been stopped + for link_uuid, link_name, link_topology_name in processed_links: + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + LOGGER.warning('Worker not found for link {:s}, skipping telemetry update for connection removal'.format(link_name)) + continue + + # Count remaining connections on this link (now excluding the deleted one) + remaining_conn_count = self._count_active_connections(link_uuid, domain_name) + + if remaining_conn_count == 0: + # No other connections use this link, stop the worker + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + LOGGER.info('Stopped telemetry worker for link {:s}, no connections remain'.format(link_name)) + + # ---- TEMPORARY: Stop triggered links (L3 and L13 when L6 is removed from trans-pkt) ---- + if link_name == "L6": + try: + triggered_links = mapping.get('triggered_links', []) + if triggered_links: + LOGGER.info('Connection {:s} has {:d} triggered links to clean up'.format( + connection_uuid, len(triggered_links))) + + for _, trig_link_name, trig_link_topology_name in triggered_links: + trig_worker_name = '{:s}:{:s}'.format(trig_link_topology_name, trig_link_name) + + if self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, trig_worker_name): + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, trig_worker_name) + LOGGER.info('Stopped triggered telemetry worker for link {:s}'.format(trig_link_name)) + else: + LOGGER.warning('Triggered worker {:s} not found during cleanup'.format(trig_worker_name)) + except Exception as e: + LOGGER.exception('Failed to stop triggered links for connection {:s}: {:s}'.format(connection_uuid, str(e))) + # ---- END OF TEMPORARY LOGIC ---- + else: + # Other connections still use this link, update worker with new count + all_links_stopped = False + worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + assert isinstance(worker, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + + worker.change_resources(remaining_conn_count) + LOGGER.info('Updated telemetry for link {:s} after connection removal, {:d} connections remain'.format( + link_name, remaining_conn_count)) + + # Delete SIMAP network only if all links have been stopped + if all_links_stopped: + delete_simap_network(self._simap_client, domain_name) + LOGGER.info('Deleted SIMAP network for domain {:s} after all links stopped'.format(domain_name)) + else: + LOGGER.debug('SIMAP network {:s} retained, some links still have active connections'.format(domain_name)) + + except Exception as e: + LOGGER.exception('Failed to process connection removal {:s}: {:s}'.format( + connection_uuid, str(e))) class SimapUpdater: diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index d420f24e9b3714939b2900155c0a1bba2d350a90..1c5c3e092e2792876414b48634abd9642218a481 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -16,11 +16,12 @@ import enum from typing import List, Optional, Set, Tuple, Union from common.proto.context_pb2 import ( - EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, - DeviceEvent, Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent + EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, DeviceEvent, + Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent, Empty ) from common.tools.grpc.Tools import grpc_message_to_json_string - +from context.client.ContextClient import ContextClient +from common.tools.context_queries.Connection import get_connection_by_uuid class EventTypeEnum(enum.IntEnum): CREATE = EVENTTYPE_CREATE @@ -160,3 +161,64 @@ def get_service_endpoint(service : Service) -> Tuple[Optional[str], List[Tuple[s raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e return topology_uuid, endpoint_uuids + + +def get_connection_endpoints_and_links(connection_id: str) -> Tuple[List[Tuple[str, str]], List[str]]: + """ + Retrieve connection details and identify associated links. + Args: + connection_id: UUID string of the connection + Returns: + Tuple of: + - List of endpoint tuples (device_uuid, endpoint_uuid) in path order + - List of link UUIDs corresponding to consecutive endpoint pairs + """ + + context_client = ContextClient() + connection = get_connection_by_uuid(context_client, connection_id, rw_copy=False) + + if connection is None: + raise Exception(f"Failed to retrieve Connection({connection_id}): Connection not found") + + # Extract path_hops_endpoint_ids + endpoint_ids = [] + for endpoint_id in connection.path_hops_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_ids.append((device_uuid, endpoint_uuid)) + + if len(endpoint_ids) < 2: + # No path or single endpoint - no links + return endpoint_ids, [] + + # Find links connecting consecutive endpoint pairs + # Get all links from context + + link_list = context_client.ListLinks(Empty()) + link_uuids = [] + + # For each consecutive pair of endpoints in the path + for i in range(len(endpoint_ids) - 1): + src_device_uuid, src_endpoint_uuid = endpoint_ids[i] + dst_device_uuid, dst_endpoint_uuid = endpoint_ids[i + 1] + + # Find link connecting these endpoints + for link in link_list.links: + if len(link.link_endpoint_ids) != 2: + continue + + # Extract link endpoints + link_ep0_device = link.link_endpoint_ids[0].device_id.device_uuid.uuid + link_ep0_endpoint = link.link_endpoint_ids[0].endpoint_uuid.uuid + link_ep1_device = link.link_endpoint_ids[1].device_id.device_uuid.uuid + link_ep1_endpoint = link.link_endpoint_ids[1].endpoint_uuid.uuid + + # Check if link matches (bidirectional check) + if ((link_ep0_device == src_device_uuid and link_ep0_endpoint == src_endpoint_uuid and + link_ep1_device == dst_device_uuid and link_ep1_endpoint == dst_endpoint_uuid) or + (link_ep1_device == src_device_uuid and link_ep1_endpoint == src_endpoint_uuid and + link_ep0_device == dst_device_uuid and link_ep0_endpoint == dst_endpoint_uuid)): + link_uuids.append(link.link_id.link_uuid.uuid) + break + + return endpoint_ids, link_uuids diff --git a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py index 075c3b6d6e5cda25f342b2814bd66b0e23fd812f..e4109790c5d56a610628c0008683e88667545fad 100644 --- a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py +++ b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py @@ -72,13 +72,17 @@ class AggregatorWorker(_Worker): def run(self) -> None: self._logger.info('[run] Starting...') + MSG = '[run] Aggregating link ({:s}, {:s}) every {:.1f}s' + self._logger.info(MSG.format( + self._network_id, self._link_id, self._sampling_interval + )) kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_BOOT_SERVERS) update_counter = 1 try: while not self._stop_event.is_set() and not self._terminate.is_set(): - #self._logger.debug('[run] Aggregating...') + self._logger.debug('[run] Aggregation cycle #{:d}...'.format(update_counter)) link_sample = self._aggregation_cache.aggregate() @@ -110,6 +114,12 @@ class AggregatorWorker(_Worker): link_sample.bandwidth_utilization, link_sample.latency, related_service_ids=list(link_sample.related_service_ids) ) + + MSG = '[run] Updated SIMAP link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms' + self._logger.debug(MSG.format( + self._network_id, self._link_id, + link_sample.bandwidth_utilization, link_sample.latency + )) update_counter += 1 diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index 27b665d05d487fd165f78a00722af72222bc9ef2..0827a3f8ce91e2c35877be79816b4b166cddf07e 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -14,6 +14,7 @@ import json, math, requests, threading, time +from requests.auth import HTTPBasicAuth from requests.exceptions import ReadTimeout from typing import Optional from .data.AggregationCache import AggregationCache, LinkSample @@ -31,6 +32,7 @@ CONTROLLER_TO_ADDRESS_PORT = { WAIT_LOOP_GRANULARITY = 0.5 +AUTH = HTTPBasicAuth('admin', 'admin') class CollectorWorker(_Worker): def __init__( @@ -73,7 +75,7 @@ class CollectorWorker(_Worker): # NOTE: Trick: we set 1-second read_timeout to force the loop to give control # back and be able to check termination events. # , timeout=(10, 1) - with session.get(stream_url, stream=True) as reply: + with session.get(stream_url, stream=True, auth=AUTH) as reply: reply.raise_for_status() it_lines = reply.iter_lines(decode_unicode=True, chunk_size=1024) @@ -140,7 +142,7 @@ class CollectorWorker(_Worker): MSG = '[direct_simap_polling] Requesting "{:s}"...' self._logger.info(MSG.format(str(simap_url))) - with requests.get(simap_url, timeout=10) as reply: + with requests.get(simap_url, timeout=10, auth=AUTH) as reply: reply.raise_for_status() data = reply.json() diff --git a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py index 884a2cff8c794eab325a4d527460e087820420c4..e8007052714ae23dbd6e75b2a00dd778d772be8c 100644 --- a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py +++ b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py @@ -34,11 +34,10 @@ class SynthesizerWorker(_Worker): self._resources = resources self._sampling_interval = sampling_interval - def change_resources(self, bandwidth_factor : float, latency_factor : float) -> None: + def change_resources(self, connection_count: int) -> None: with self._lock: for link in self._resources.links: - link.bandwidth_utilization_sampler.offset *= bandwidth_factor - link.latency_sampler.offset *= latency_factor + link.metrics_sampler.connection_count = connection_count def run(self) -> None: self._logger.info('[run] Starting...') diff --git a/src/simap_connector/service/telemetry/worker/_Worker.py b/src/simap_connector/service/telemetry/worker/_Worker.py index ae0da4fc78e076b9887f8b9164d5d1066dd7f7b5..e6fe4f1fb1f6167c4c1d0db7a45cac2b7de2ed85 100644 --- a/src/simap_connector/service/telemetry/worker/_Worker.py +++ b/src/simap_connector/service/telemetry/worker/_Worker.py @@ -35,12 +35,12 @@ class _Worker(threading.Thread): ) -> None: self._worker_type = worker_type self._worker_name = worker_name - self._worker_key = get_worker_key(worker_type, worker_name) + self._worker_key = get_worker_key(worker_type, worker_name) name = 'TelemetryWorker({:s})'.format(self._worker_key) super().__init__(name=name, daemon=True) - self._logger = logging.getLogger(name) - self._stop_event = threading.Event() - self._terminate = threading.Event() if terminate is None else terminate + self._logger = logging.getLogger(name) + self._stop_event = threading.Event() + self._terminate = threading.Event() if terminate is None else terminate @property def worker_type(self) -> WorkerTypeEnum: return self._worker_type diff --git a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py index 31a71d09634da3480d202cbaa4f3e20866deb154..7c71a8926bed4018cd52ac055c2a581fbaab30ae 100644 --- a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py +++ b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py @@ -13,10 +13,13 @@ # limitations under the License. -import threading +import logging, threading from dataclasses import dataclass, field -from datetime import datetime -from typing import Dict, Set, Tuple +from datetime import datetime, timezone +from typing import Dict, Optional, Set, Tuple + + +LOGGER = logging.getLogger(__name__) @dataclass @@ -40,18 +43,59 @@ class AggregationCache: def __init__(self) -> None: self._lock = threading.Lock() self._samples : Dict[Tuple[str, str], LinkSample] = dict() + self._last_valid_aggregation : Optional[AggregatedLinkSample] = None def update(self, link_sample : LinkSample) -> None: link_key = (link_sample.network_id, link_sample.link_id) with self._lock: self._samples[link_key] = link_sample + + MSG = '[update] Received sample for link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms, Services={:s}' + LOGGER.debug(MSG.format( + link_sample.network_id, link_sample.link_id, + link_sample.bandwidth_utilization, link_sample.latency, + str(link_sample.related_service_ids) + )) def aggregate(self) -> AggregatedLinkSample: with self._lock: - agg = AggregatedLinkSample(timestamp=datetime.utcnow()) - for sample in self._samples.values(): + num_samples = len(self._samples) + if num_samples > 0: + MSG = '[aggregate] Aggregating {:d} supporting link(s)' + LOGGER.info(MSG.format(num_samples)) + + if num_samples == 0: + if self._last_valid_aggregation is not None: + MSG = '[aggregate] No samples available, reusing last valid aggregation: BW={:.2f}%, Latency={:.3f}ms' + LOGGER.warning(MSG.format( + self._last_valid_aggregation.bandwidth_utilization, + self._last_valid_aggregation.latency + )) + # Return a copy with updated timestamp + return AggregatedLinkSample( + timestamp=datetime.now(timezone.utc), + bandwidth_utilization=self._last_valid_aggregation.bandwidth_utilization, + latency=self._last_valid_aggregation.latency, + related_service_ids=self._last_valid_aggregation.related_service_ids.copy() + ) + else: + MSG = '[aggregate] No samples available and no cached data, returning zeros' + LOGGER.warning(MSG) + return AggregatedLinkSample(timestamp=datetime.now(timezone.utc)) + + agg = AggregatedLinkSample(timestamp=datetime.now(timezone.utc)) + for link_key, sample in self._samples.items(): + network_id, link_id = link_key + + MSG = '[aggregate] - Link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms, Services={:s}' + LOGGER.debug(MSG.format( + network_id, link_id, + sample.bandwidth_utilization, sample.latency, + str(sample.related_service_ids) + )) + agg.bandwidth_utilization = max( agg.bandwidth_utilization, sample.bandwidth_utilization ) @@ -59,4 +103,14 @@ class AggregationCache: agg.related_service_ids = agg.related_service_ids.union( sample.related_service_ids ) + + if num_samples > 0: + MSG = '[aggregate] Result: BW={:.2f}% (max), Latency={:.3f}ms (sum), Services={:s}' + LOGGER.info(MSG.format( + agg.bandwidth_utilization, agg.latency, + str(agg.related_service_ids) + )) + # Cache this valid aggregation for future use + self._last_valid_aggregation = agg + return agg diff --git a/src/simap_connector/service/telemetry/worker/data/Resources.py b/src/simap_connector/service/telemetry/worker/data/Resources.py index 49c16c3404d5de650fcd13239eafcf87b4a98abc..2f3de063526548230107e692a98258dc31981cdd 100644 --- a/src/simap_connector/service/telemetry/worker/data/Resources.py +++ b/src/simap_connector/service/telemetry/worker/data/Resources.py @@ -27,8 +27,8 @@ class ResourceNode: related_service_ids : List[str] = field(default_factory=list) def generate_samples(self, simap_client : SimapClient) -> None: - cpu_utilization = self.cpu_utilization_sampler.get_sample() - simap_node = simap_client.network(self.domain_name).node(self.node_name) + cpu_utilization, _ = self.cpu_utilization_sampler.get_sample() + simap_node = simap_client.network(self.domain_name).node(self.node_name) simap_node.telemetry.update( cpu_utilization.value, related_service_ids=self.related_service_ids ) @@ -36,15 +36,13 @@ class ResourceNode: @dataclass class ResourceLink: - domain_name : str - link_name : str - bandwidth_utilization_sampler : SyntheticSampler - latency_sampler : SyntheticSampler - related_service_ids : List[str] = field(default_factory=list) + domain_name : str + link_name : str + metrics_sampler : SyntheticSampler # Single sampler for both BW and latency + related_service_ids : List[str] = field(default_factory=list) def generate_samples(self, simap_client : SimapClient) -> None: - bandwidth_utilization = self.bandwidth_utilization_sampler.get_sample() - latency = self.latency_sampler.get_sample() + bandwidth_utilization, latency = self.metrics_sampler.get_sample() simap_link = simap_client.network(self.domain_name).link(self.link_name) simap_link.telemetry.update( bandwidth_utilization.value, latency.value, diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index b942971282afa6818bab7008bd531fd6e9739cbf..1bcbbb9bd7f445b01f5c9892970c512ff9a52f65 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -13,51 +13,96 @@ # limitations under the License. -import math, random, sys, threading +import random, threading from dataclasses import dataclass, field from datetime import datetime -from typing import Dict, Optional +from typing import Dict, Optional, Tuple from .Sample import Sample @dataclass class SyntheticSampler: - amplitude : float = field(default=0.0) - phase : float = field(default=0.0) - period : float = field(default=1.0) - offset : float = field(default=0.0) - noise_ratio : float = field(default=0.0) - min_value : float = field(default=-sys.float_info.max) - max_value : float = field(default=sys.float_info.max) + """Simple sampler with temporal continuity - next values stay close to previous values. + + Bandwidth ranges based on connection count: + 0 conns: avg=3%, range 1-10% + 1 conn: avg=25%, range 15-30% + 2 conns: avg=45%, range 35-55% + 3 conns: avg=65%, range 60-80% + 4+ conns: avg=85%, range 80-95% + + Latency uses bandwidth ranges divided by 10 (0-10ms): + 0 conns: avg=0.3ms, range 0.1-1.0ms + 1 conn: avg=2.5ms, range 1.5-3.0ms + 2 conns: avg=4.5ms, range 3.5-5.5ms + 3 conns: avg=6.5ms, range 6.0-8.0ms + 4+ conns: avg=8.5ms, range 8.0-9.5ms + + Values vary by ±1% between consecutive samples for temporal continuity. + """ + connection_count : int = field(default = 0) + link_capacity : float = field(default = 100.0) + prev_bw : Optional[float] = field(default = None) + prev_latency : Optional[float] = field(default = None) + + # Connection count to (avg, min, max) percentage mapping + # Latency uses same ranges divided by 10 (0-10ms range) + BW_RANGES = { + 0: (3, 5, 10), + 1: (25, 15, 30), + 2: (40, 35, 50), + 3: (60, 65, 80), + 4: (85, 80, 95), + } + LAT_RANGES = { + 0: (0.4, 0.1, 0.8), + 1: (1.4, 1.0, 1.8), + 2: (2.4, 2.0, 2.8), + 3: (3.4, 3.0, 3.8), + 4: (4.4, 4.0, 4.8), + } @classmethod def create_random( - cls, amplitude_scale : float, phase_scale : float, period_scale : float, - offset_scale : float, noise_ratio : float, - min_value : Optional[float] = None, max_value : Optional[float] = None + cls, + connection_count : int = 0, + link_capacity : float = 100.0 ) -> 'SyntheticSampler': - amplitude = amplitude_scale * random.random() - phase = phase_scale * random.random() - period = period_scale * random.random() - offset = offset_scale * random.random() + amplitude - if min_value is None: min_value = -sys.float_info.max - if max_value is None: max_value = sys.float_info.max - return cls(amplitude, phase, period, offset, noise_ratio, min_value, max_value) + """Factory method for compatibility (ignores unused parameters).""" + return cls(connection_count=connection_count, link_capacity=link_capacity) - def get_sample(self) -> Sample: - timestamp = datetime.timestamp(datetime.utcnow()) + def get_sample(self) -> Tuple[Sample, Sample]: + """Generate bandwidth and latency samples with temporal continuity. + + Returns: + Tuple of (bandwidth_sample, latency_sample) + """ + timestamp = datetime.now().timestamp() + conn_key = min(self.connection_count, 4) - waveform = math.sin(2 * math.pi * timestamp / self.period + self.phase) - waveform *= self.amplitude - waveform += self.offset - - noise = self.amplitude * random.random() - value = abs((1.0 - self.noise_ratio) * waveform + self.noise_ratio * noise) - - value = max(value, self.min_value) - value = min(value, self.max_value) - - return Sample(timestamp, 0, value) + avg, min_bw, max_bw = self.BW_RANGES[conn_key] + if self.prev_bw is None: + bw_utilization = avg + else: + noise_factor = random.uniform(-0.01, 0.01) # ±1% noise for bandwidth + bw_utilization = self.prev_bw * (1.0 + noise_factor) + + bw_utilization = max(min_bw, min(max_bw, bw_utilization)) + self.prev_bw = bw_utilization + + avg_lat, min_lat, max_lat = self.LAT_RANGES[conn_key] + if self.prev_latency is None: + latency = avg_lat + else: + noise_factor = random.uniform(-0.05, 0.05) # ±5% noise for latency + latency = self.prev_latency * (1.0 + noise_factor) + + latency = max(min_lat, min(max_lat, latency)) + self.prev_latency = latency + + # actual_bw_utilization = (bw_utilization / 100.0) * self.link_capacity + + return (Sample(timestamp, 0, bw_utilization), Sample(timestamp, 0, latency)) class SyntheticSamplers: @@ -66,22 +111,27 @@ class SyntheticSamplers: self._samplers : Dict[str, SyntheticSampler] = dict() def add_sampler( - self, sampler_name : str, amplitude_scale : float, phase_scale : float, - period_scale : float, offset_scale : float, noise_ratio : float + self, sampler_name : str, + connection_count : int = 0, + link_capacity : float = 100.0 ) -> None: with self._lock: if sampler_name in self._samplers: MSG = 'SyntheticSampler({:s}) already exists' raise Exception(MSG.format(sampler_name)) self._samplers[sampler_name] = SyntheticSampler.create_random( - amplitude_scale, phase_scale, period_scale, offset_scale, noise_ratio + connection_count=connection_count, + link_capacity=link_capacity ) def remove_sampler(self, sampler_name : str) -> None: with self._lock: self._samplers.pop(sampler_name, None) - def get_sample(self, sampler_name : str) -> Sample: + def get_sample(self, sampler_name : str) -> Tuple[Sample, Sample]: + """Get both bandwidth and latency samples. + Returns: Tuple of (bandwidth_sample, latency_sample) + """ with self._lock: sampler = self._samplers.get(sampler_name) if sampler_name not in self._samplers: diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json index 786a6df35d4a1623311a40c7357b77b25a07e2b7..ef050bbacab0a19b7909c783cf9634acb3b46633 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -115,4 +115,4 @@ } } ] -} \ No newline at end of file +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index f0875e25ea758d6c43866410dcbb720644da1aed..2bc13b12e3fc7ccc785f94a729144c66659e824c 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -115,4 +115,4 @@ } } ] -} \ No newline at end of file +} diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy.sh b/src/tests/ecoc25-f5ga-telemetry/deploy.sh index 4bdf8715d9826b9d609c2716d569fd9b47226065..66a6f6ffbe370f1a22aa27ac374d4515da899c5d 100755 --- a/src/tests/ecoc25-f5ga-telemetry/deploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy.sh @@ -14,7 +14,7 @@ # limitations under the License. -# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl # Get the current hostname HOSTNAME=$(hostname) @@ -22,10 +22,10 @@ echo "Deploying in ${HOSTNAME}..." case "$HOSTNAME" in - simap-server) - echo "Building SIMAP Server..." + simap-datastore) + echo "Building SIMAP DataStore..." cd ~/tfs-ctrl/ - docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . + docker buildx build -t simap-datastore:mock -f ./src/tests/tools/simap_datastore/Dockerfile . echo "Building NCE-FAN Controller..." cd ~/tfs-ctrl/ @@ -40,13 +40,13 @@ case "$HOSTNAME" in docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . echo "Cleaning up..." - docker rm --force simap-server + docker rm --force simap-datastore docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl docker rm --force traffic-changer echo "Deploying support services..." - docker run --detach --name simap-server --publish 8080:8080 simap-server:mock + docker run --detach --name simap-datastore --publish 8080:8080 simap-datastore:mock docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock diff --git a/src/tests/ecoc25-f5ga-telemetry/destroy.sh b/src/tests/ecoc25-f5ga-telemetry/destroy.sh index 47977562d859ecc85c3a56eebe483d3843769dd9..52cbd1353bfe1813958fc25192bb14cce175efb0 100755 --- a/src/tests/ecoc25-f5ga-telemetry/destroy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/destroy.sh @@ -14,7 +14,7 @@ # limitations under the License. -# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl # Get the current hostname HOSTNAME=$(hostname) @@ -22,9 +22,9 @@ echo "Destroying in ${HOSTNAME}..." case "$HOSTNAME" in - simap-server) + simap-datastore) echo "Cleaning up..." - docker rm --force simap-server + docker rm --force simap-datastore docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl docker rm --force traffic-changer diff --git a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh new file mode 100755 index 0000000000000000000000000000000000000000..62d3c587d655000cd0cb0cb83aed94d4192d4ba7 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set working directory +cd "$(dirname "$0")" || exit 1 + +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Collecting logs for ${HOSTNAME}..." + +rm logs -rf tmp/exec +mkdir -p tmp/exec + +case "$HOSTNAME" in + simap-datastore) + echo "Collecting Docker container logs..." + docker logs simap-datastore > tmp/exec/simap-datastore.log 2>&1 + docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 + docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 + docker logs traffic-changer > tmp/exec/traffic-changer.log 2>&1 + ;; + tfs-e2e-ctrl) + echo "Collecting TFS E2E Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/e2e-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/e2e-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/e2e-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/e2e-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/e2e-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/e2e-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/e2e-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/e2e-simap-connector.log + ;; + tfs-agg-ctrl) + echo "Collecting TFS Aggregation Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/agg-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/agg-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/agg-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/agg-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/agg-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/agg-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/agg-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/agg-simap-connector.log + ;; + tfs-ip-ctrl) + echo "Collecting TFS IP Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/ip-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/ip-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/ip-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/ip-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/ip-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/ip-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/ip-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/ip-simap-connector.log + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No logs to collect." + ;; +esac + +printf "\n" + +echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py b/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py index 86ee09dab9f2a76f41b710704a38567694a01fe7..559556829ba75396d865f05aae6bba40c71cb477 100644 --- a/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py +++ b/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py @@ -64,7 +64,7 @@ def main() -> None: stream_url = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, subscription_uri) print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) - with requests.get(stream_url, stream=True) as resp: + with requests.get(stream_url, stream=True, auth=auth) as resp: for line in resp.iter_lines(decode_unicode=True): print(line) diff --git a/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json b/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json new file mode 100644 index 0000000000000000000000000000000000000000..ba9c9d853638f38442d21c44b02fec183d46df61 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json @@ -0,0 +1,185 @@ +{ + "ietf-l3vpn-svc:l3vpn-svc": { + "sites": { + "site": [ + { + "devices": { + "device": [ + { + "device-id": "P-PE1", + "location": "access" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "access" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.1.101.22/24", + "lan-tag": "21", + "next-hop": "128.32.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_access", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE1", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "128.32.44.254", + "prefix-length": "24", + "provider-address": "128.32.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 20 + } + } + ] + } + } + }, + "svc-input-bandwidth": 1000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 5000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:hub-role", + "vpn-id": "slice25" + } + } + ] + } + }, + { + "devices": { + "device": [ + { + "device-id": "P-PE2", + "location": "cloud" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "cloud" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.16.104.221/24", + "lan-tag": "201", + "next-hop": "172.10.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_cloud", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE2", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "172.10.44.254", + "prefix-length": "24", + "provider-address": "172.10.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 10 + } + } + ] + } + } + }, + "svc-input-bandwidth": 5000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 1000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:spoke-role", + "vpn-id": "slice25" + } + } + ] + } + } + ] + }, + "vpn-services": { + "vpn-service": [ + { + "vpn-id": "slice25" + } + ] + } + } +} diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json new file mode 100644 index 0000000000000000000000000000000000000000..fd63bbabb1e51d31fc27fd5f964c005249c52b2a --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "initial_background_slice_1", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json b/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json new file mode 100644 index 0000000000000000000000000000000000000000..de69d29f92d8c7eeb1027a9deb0d61b781edb688 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "game_slice_on_ip_transport", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json new file mode 100644 index 0000000000000000000000000000000000000000..e76be75697b67c288aac35ae9e3d94f5bee029a1 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "another_background_slice_3", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json b/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json new file mode 100644 index 0000000000000000000000000000000000000000..54fd5b2a01c1afa35c04d4d4f7890e5fb5e29f62 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "game_slice_on_optical_transport", + "description": "network slice 2, PC1-VM2 - using optical transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP1", + "sdp-ip-address": ["172.16.204.220"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP1 to VM2", + "description": "AC POP1 connected to VM2", + "ac-node-id": "POP1", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line2", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "7000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "4000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json new file mode 100644 index 0000000000000000000000000000000000000000..2b4c1999acee3d0840f93e0b9322b630c3f235aa --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "another_background_slice_5", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json new file mode 100644 index 0000000000000000000000000000000000000000..3a2c4b96c8daaa282999ccc68916d77124f1294a --- /dev/null +++ b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L1/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json new file mode 100644 index 0000000000000000000000000000000000000000..cd0954ac1f95f99fd8a4d4174819fdb9edca99e8 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L2/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-agg.json b/src/tests/mwc26-f5ga/data/topology/topology-agg.json new file mode 100644 index 0000000000000000000000000000000000000000..c761a86dd4bfc3865b03a70d5f4fb86d291a283b --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-agg.json @@ -0,0 +1,95 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "agg"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.12"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "device_type": "nce", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "POP1"}}, "device_type": "packet-pop", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.220"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.201.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 101, + "site_location": "transport", "mtu": "1500" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "POP2"}}, "device_type": "packet-pop", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.101.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 201, + "site_location": "transport", "mtu": "1500" + }}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "L13"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "L14"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "O-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} + ]} + ] +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-e2e.json b/src/tests/mwc26-f5ga/data/topology/topology-e2e.json new file mode 100644 index 0000000000000000000000000000000000000000..117e97e61881da30a29027860f3927d9f98a88ab --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-e2e.json @@ -0,0 +1,43 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "e2e"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_SLICE"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.11"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "device_type": "nce", + "device_drivers": ["DEVICEDRIVER_NCE"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "L3"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "L4"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "O-PE1"}}, "endpoint_uuid": {"uuid": "200"}} + ]} + ] +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-ip.json b/src/tests/mwc26-f5ga/data/topology/topology-ip.json new file mode 100644 index 0000000000000000000000000000000000000000..cd772016009c21a7a36b8607d158845f9d1a5db2 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-ip.json @@ -0,0 +1,149 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "trans-pkt"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "P-PE1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.122.25"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "128.32.44.254", "address_prefix": "24", "vlan_tag": 21, + "site_location": "access", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.1.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.2.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-P1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.31"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.1.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.3.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-P2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.33"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.2.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.4.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-PE2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.32"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.10.44.254", "address_prefix": "24", "vlan_tag": 201, + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.3.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.4.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "L5"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L6"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L9"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L10"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "501"}} + ] + } + ] +} diff --git a/src/tests/mwc26-f5ga/deploy-specs-agg.sh b/src/tests/mwc26-f5ga/deploy-specs-agg.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7b5e98b50ebf7e057b36a6d7b0433b0c0e85a7e --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-agg.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy-specs-e2e.sh b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7b5e98b50ebf7e057b36a6d7b0433b0c0e85a7e --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy-specs-ip.sh b/src/tests/mwc26-f5ga/deploy-specs-ip.sh new file mode 100644 index 0000000000000000000000000000000000000000..c02dac122fb3dd8cbda547be25f268920cc4e5e5 --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-ip.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy.sh b/src/tests/mwc26-f5ga/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..4cda867d5d3d7ec614c99203cffa91e670311c85 --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy.sh @@ -0,0 +1,111 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/../../../.." && pwd)" + +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Deploying in ${HOSTNAME}..." + +case "$HOSTNAME" in + simap-datastore) + echo "Building SIMAP DataStore..." + cd "${REPO_ROOT}" + docker buildx build -t simap-datastore:mock -f ./src/tests/tools/simap_datastore/Dockerfile . + + echo "Building NCE-FAN Controller..." + docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . + + echo "Building NCE-T Controller..." + docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . + + echo "Building AI Engine..." + docker buildx build -t ai-engine:latest -f ./src/tests/tools/simap_ai_engine/ai_engine/Dockerfile . + + echo "Cleaning up..." + docker rm --force simap-datastore + docker rm --force nce-fan-ctrl + docker rm --force nce-t-ctrl + docker rm --force ai-engine + + echo "Deploying support services..." + docker run --detach --name simap-datastore --publish 8080:8080 \ + -e INFLUXDB_HOST=10.254.0.9 \ + -e INFLUXDB_PORT=8181 \ + simap-datastore:mock + + docker run --detach --name nce-fan-ctrl --publish 8081:8080 \ + --env SIMAP_ADDRESS=10.254.0.9 \ + --env SIMAP_PORT=8080 \ + nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8082:8080 \ + --env SIMAP_ADDRESS=10.254.0.9 \ + --env SIMAP_PORT=8080 \ + nce-t-ctrl:mock + + echo "Deploying AI Engine..." + docker run --detach --name ai-engine --publish 8084:8080 \ + --env SIMAP_DATASTORE_ADDRESS=10.254.0.9 \ + --env SIMAP_DATASTORE_PORT=8181 \ + --env SIMAP_DATASTORE_USERNAME=admin \ + --env SIMAP_DATASTORE_PASSWORD=admin \ + ai-engine:latest + # NOTE: If testing, run client (src/tests/tools/simap_server/run_client.sh) to manually populate SIMAP Server with telemetry data. + + sleep 2 + docker ps -a + ;; + tfs-e2e-ctrl) + echo "Deploying TFS E2E Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (End-to-End)|' src/webui/service/templates/main/home.html + source ~/tfs-ctrl/src/tests/mwc26-f5ga/deploy-specs-e2e.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + tfs-agg-ctrl) + echo "Deploying TFS Agg Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (Aggregation)|' src/webui/service/templates/main/home.html + source ~/tfs-ctrl/src/tests/mwc26-f5ga/deploy-specs-agg.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + tfs-ip-ctrl) + echo "Deploying TFS IP Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (IP)|' src/webui/service/templates/main/home.html + source ~/tfs-ctrl/src/tests/mwc26-f5ga/deploy-specs-ip.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No commands to run." + ;; +esac + +echo "Deployment complete." diff --git a/src/tests/mwc26-f5ga/destroy.sh b/src/tests/mwc26-f5ga/destroy.sh new file mode 100755 index 0000000000000000000000000000000000000000..6da3f984029846aa91bc9000e0e3f55a1644974d --- /dev/null +++ b/src/tests/mwc26-f5ga/destroy.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +echo "Cleaning up..." +docker rm --force simap-datastore 2>/dev/null || true +docker rm --force nce-fan-ctrl 2>/dev/null || true +docker rm --force nce-t-ctrl 2>/dev/null || true +docker rm --force ai-engine 2>/dev/null || true +docker rm --force traffic-changer 2>/dev/null || true +sleep 2 +docker ps -a diff --git a/src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh b/src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh new file mode 100755 index 0000000000000000000000000000000000000000..d5e199e0ca9176d0e513977de376a1a75500ee45 --- /dev/null +++ b/src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------- +# For direct testing of L3VPN delete from IP-Controller, without the need to trigger it from AGG-Controller. +# This is a dummy script that replicates the behavior of AGG-Controller when it sends a delete request to IP-Controller. +# -------------- + +cd $(dirname $0) + +echo "[IP-Controller] sending L3VPN delete (dummy replicating AGG-Controller )..." +curl --request DELETE --user admin:admin --location \ + http://10.254.0.12:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service=slice25 + +echo + +echo "Done! Delete!" diff --git a/src/tests/mwc26-f5ga/dummy_L3VPN_request.sh b/src/tests/mwc26-f5ga/dummy_L3VPN_request.sh new file mode 100755 index 0000000000000000000000000000000000000000..c195fe34f4bd3cfe4cb97d787ac6288e60890292 --- /dev/null +++ b/src/tests/mwc26-f5ga/dummy_L3VPN_request.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------- +# For direct testing of L3VPN request from IP-Controller, without the need to trigger it from AGG-Controller. +# This is a dummy script that replicates the behavior of AGG-Controller when it sends a request to IP-Controller. +# -------------- + +cd $(dirname $0) + +echo "[IP-Controller] sending L3VPN request (dummy replicating AGG-Controller request)..." +curl --request POST --location --user admin:admin --header 'Content-Type: application/json' \ + --data @data/slices/l3vpn_request_from_agg.json \ + http://127.0.0.1:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/dump-logs.sh b/src/tests/mwc26-f5ga/dump-logs.sh new file mode 100755 index 0000000000000000000000000000000000000000..391307fd93e8a330c9bf45667c94f162187294b1 --- /dev/null +++ b/src/tests/mwc26-f5ga/dump-logs.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set working directory +cd "$(dirname "$0")" || exit 1 + +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Collecting logs for ${HOSTNAME}..." + +rm logs -rf tmp/exec +mkdir -p tmp/exec + +case "$HOSTNAME" in + simap-datastore) + echo "Collecting Docker container logs..." + docker logs simap-datastore > tmp/exec/simap-datastore.log 2>&1 + docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 + docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 + docker logs ai-engine > tmp/exec/ai-engine.log 2>&1 + ;; + tfs-e2e-ctrl) + echo "Collecting TFS E2E Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/e2e-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/e2e-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/e2e-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/e2e-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/e2e-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/e2e-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/e2e-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/e2e-simap-connector.log + ;; + tfs-agg-ctrl) + echo "Collecting TFS Aggregation Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/agg-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/agg-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/agg-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/agg-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/agg-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/agg-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/agg-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/agg-simap-connector.log + ;; + tfs-ip-ctrl) + echo "Collecting TFS IP Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/ip-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/ip-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/ip-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/ip-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/ip-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/ip-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/ip-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/ip-simap-connector.log + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No logs to collect." + ;; +esac + +printf "\n" + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice1_background.sh b/src/tests/mwc26-f5ga/provision-slice1_background.sh new file mode 100755 index 0000000000000000000000000000000000000000..1406f7c3cc1b374e29534fb655476ac037e75eb3 --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice1_background.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice1..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice1_background.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice2_game.sh b/src/tests/mwc26-f5ga/provision-slice2_game.sh new file mode 100755 index 0000000000000000000000000000000000000000..cde5607ba997717c903494d15da8a7e6ae1e23dd --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice2_game.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice2..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice2_game_creation.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice3_another_background.sh b/src/tests/mwc26-f5ga/provision-slice3_another_background.sh new file mode 100755 index 0000000000000000000000000000000000000000..18162934947b52ac91f46f33b56c40edb63b3076 --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice3_another_background.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice3..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice3_background.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice4_optical.sh b/src/tests/mwc26-f5ga/provision-slice4_optical.sh new file mode 100755 index 0000000000000000000000000000000000000000..1973f6b2d9a92dbe26fbd5e35f880c2f1b7fd0fe --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice4_optical.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice4..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice4_optical.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice5_another_background.sh b/src/tests/mwc26-f5ga/provision-slice5_another_background.sh new file mode 100755 index 0000000000000000000000000000000000000000..a186cf531c18bb503345483ada47e7dc9acb3f94 --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice5_another_background.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice5..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice5_background.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh b/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh new file mode 100755 index 0000000000000000000000000000000000000000..853e269e9b965c2a92ce99f2e0c924a3d4c2616e --- /dev/null +++ b/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set working directory +cd "$(dirname "$0")" || exit 1 + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Starting telemetry subscription for ${HOSTNAME}..." + +case "$HOSTNAME" in + tfs-e2e-ctrl) + echo "Subscribing to E2E Controller telemetry..." + python3 telemetry-subscribe-slice1.py e2e E2E-L1 + ;; + tfs-agg-ctrl) + echo "Subscribing to Aggregation Controller telemetry..." + python3 telemetry-subscribe-slice1.py agg AggNet-L1 + ;; + tfs-ip-ctrl) + echo "Subscribing to IP Controller telemetry..." + python3 telemetry-subscribe-slice1.py trans-pkt Trans-L1 + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "Usage: $0" + echo " This script must be run on tfs-e2e-ctrl, tfs-agg-ctrl, or tfs-ip-ctrl" + exit 1 + ;; +esac diff --git a/src/tests/mwc26-f5ga/teardown-slice1_background.sh b/src/tests/mwc26-f5ga/teardown-slice1_background.sh new file mode 100755 index 0000000000000000000000000000000000000000..1b4f1b994ff9eae94a687763218147c424149076 --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice1_background.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice2..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=initial_background_slice_1 +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice2_game.sh b/src/tests/mwc26-f5ga/teardown-slice2_game.sh new file mode 100755 index 0000000000000000000000000000000000000000..d136e79b0a90ed0a2ce9d0fe9cf1ec5ea25c5f11 --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice2_game.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice2..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=game_slice_on_ip_transport +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh b/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh new file mode 100755 index 0000000000000000000000000000000000000000..85fdcdabffbc97899aeae2f00a1aac0e2ac44242 --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice3..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=another_background_slice_3 +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice4_optical.sh b/src/tests/mwc26-f5ga/teardown-slice4_optical.sh new file mode 100755 index 0000000000000000000000000000000000000000..c45a11be155f4e8f406bcf72568bb838381acbea --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice4_optical.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice4..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=game_slice_on_optical_transport +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice5_another_background.sh b/src/tests/mwc26-f5ga/teardown-slice5_another_background.sh new file mode 100755 index 0000000000000000000000000000000000000000..a8879dd10690419c0afddc8f51bd09481164bd04 --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice5_another_background.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice3..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=another_background_slice_5 +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/telemetry-delete-slice1.py b/src/tests/mwc26-f5ga/telemetry-delete-slice1.py new file mode 100644 index 0000000000000000000000000000000000000000..b2924e1b201407b25e8661c946b0053dc1dac7ab --- /dev/null +++ b/src/tests/mwc26-f5ga/telemetry-delete-slice1.py @@ -0,0 +1,46 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import requests +from requests.auth import HTTPBasicAuth + + +RESTCONF_ADDRESS = '127.0.0.1' +RESTCONF_PORT = 80 +TELEMETRY_ID = 1109405947767160833 + +UNSUBSCRIBE_URI = '/restconf/operations/subscriptions:delete-subscription' +UNSUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, UNSUBSCRIBE_URI) +REQUEST = { + 'ietf-subscribed-notifications:input': { + 'id': TELEMETRY_ID, + } +} + + +def main() -> None: + print('[E2E] Delete Telemetry slice1...') + headers = {'accept': 'application/json'} + auth = HTTPBasicAuth('admin', 'admin') + print(UNSUBSCRIBE_URL) + print(REQUEST) + reply = requests.post( + UNSUBSCRIBE_URL, headers=headers, json=REQUEST, auth=auth, + verify=False, allow_redirects=True, timeout=30 + ) + reply.raise_for_status() + +if __name__ == '__main__': + main() diff --git a/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py new file mode 100644 index 0000000000000000000000000000000000000000..92d23ad34cb80528d49f0db8c957cb68ec4eefdd --- /dev/null +++ b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py @@ -0,0 +1,79 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sys +import requests +from requests.auth import HTTPBasicAuth + + +if len(sys.argv) < 3: + print('Usage: {:s} '.format(sys.argv[0])) + print('Example: {:s} e2e E2E-L1'.format(sys.argv[0])) + sys.exit(1) + +RESTCONF_ADDRESS = '127.0.0.1' +RESTCONF_PORT = 80 +TARGET_SIMAP_NAME = sys.argv[1] +TARGET_LINK_NAME = sys.argv[2] +SAMPLING_INTERVAL = 10.0 + + +SUBSCRIBE_URI = '/restconf/operations/subscriptions:establish-subscription' +SUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, SUBSCRIBE_URI) +XPATH_FILTER = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' +REQUEST = { + 'ietf-subscribed-notifications:input': { + 'datastore': 'operational', + 'ietf-yang-push:datastore-xpath-filter': XPATH_FILTER.format(TARGET_SIMAP_NAME, TARGET_LINK_NAME), + 'ietf-yang-push:periodic': { + 'ietf-yang-push:period': SAMPLING_INTERVAL + } + } +} + + +def main() -> None: + print('[{:s}] Subscribe Telemetry slice1 for link {:s}...'.format(TARGET_SIMAP_NAME.upper(), TARGET_LINK_NAME)) + headers = {'accept': 'application/json'} + auth = HTTPBasicAuth('admin', 'admin') + print(SUBSCRIBE_URL) + print(REQUEST) + reply = requests.post( + SUBSCRIBE_URL, headers=headers, json=REQUEST, auth=auth, + verify=False, allow_redirects=True, timeout=30 + ) + content_type = reply.headers.get('Content-Type', '') + if 'application/json' not in content_type: + raise Exception('Not JSON:', reply.content.decode('UTF-8')) + try: + reply_data = reply.json() + except ValueError as e: + str_error = 'Invalid JSON: {:s}'.format(str(reply.content.decode('UTF-8'))) + raise Exception(str_error) from e + + if 'uri' not in reply_data: + raise Exception('Unexpected Reply: {:s}'.format(str(reply_data))) + subscription_uri = reply_data['uri'] + + stream_url = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, subscription_uri) + print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) + + with requests.get(stream_url, stream=True, auth=auth) as resp: + for i, line in enumerate(resp.iter_lines(decode_unicode=True), 1): + if i % 10 == 0: + print(line) + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring.json b/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring.json new file mode 100644 index 0000000000000000000000000000000000000000..f7adef9f354d7b19abdf0d11e631ca2af36e8a34 --- /dev/null +++ b/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring.json @@ -0,0 +1,1117 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "orange", + "name": "Service Upgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "UPGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "red", + "name": "Service Downgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "DOWNGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 0, + "links": [], + "panels": [ + { + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "# SIMAP Service-Level SLA Monitoring\n", + "mode": "markdown" + }, + "pluginVersion": "12.3.1", + "title": "Dashboard Overview", + "type": "text" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [ + { + "options": { + "DOWNGRADE": { + "color": "red", + "index": 1, + "text": "⬇️ DOWNGRADE" + }, + "UPGRADE": { + "color": "green", + "index": 0, + "text": "⬆️ UPGRADE" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.width", + "value": 180 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Status" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "type": "color-background" + } + } + ] + } + ] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 11, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Time" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "Status": 1, + "Time": 0, + "Timestamp Info": 2 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 65 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "light-blue", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "Link Bandwidth Utilization (%) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "light-blue", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 7, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "Link Latency (ms) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 5 + }, + { + "color": "red", + "value": 10 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Bandwidth Utilization (%)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "gauge" + } + }, + { + "id": "unit", + "value": "percent" + }, + { + "id": "max", + "value": 100 + }, + { + "id": "min", + "value": 0 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Latency (ms)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "color-background" + } + }, + { + "id": "unit", + "value": "ms" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 8, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Bandwidth Utilization (%)" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Link Comparison Table - Current Status", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "link_id": "Link ID", + "network_id": "Network ID" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Bandwidth Utilization (%)", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "link_telemetry.bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Actual BW" + }, + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "predicted_telemetry.pred_bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Predicted BW" + }, + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 31 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "E2E-L1: Bandwidth Comparison (Actual vs Predicted)", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Latency (ms)", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "ms" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "link_telemetry.latency {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Actual Latency" + }, + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "predicted_telemetry.pred_latency {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Predicted Latency" + }, + { + "id": "color", + "value": { + "fixedColor": "orange", + "mode": "fixed" + } + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 31 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "E2E-L1: Latency Comparison (Actual vs Predicted)", + "type": "timeseries" + } + ], + "preload": false, + "refresh": "10s", + "schemaVersion": 42, + "tags": [ + "simap", + "telemetry", + "sla", + "service-monitoring" + ], + "templating": { + "list": [ + { + "current": { + "text": "influxdb-SIMAP-Server", + "value": "cf9ge11vhadj4b" + }, + "includeAll": false, + "label": "InfluxDB Datasource", + "name": "DS_INFLUXDB", + "options": [], + "query": "influxdb", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "label": "Time Range", + "name": "timerange", + "options": [ + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "20m", + "value": "20m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "3h", + "value": "3h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + } + ], + "query": "5m,10m,20m,1h,3h,6h,12h", + "refresh": 2, + "type": "interval" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "includeAll": true, + "label": "Network ID", + "multi": true, + "name": "network_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "includeAll": true, + "label": "Link ID", + "multi": true, + "name": "link_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m" + ] + }, + "timezone": "", + "title": "SIMAP V4 Service-Level SLA Monitoring", + "uid": "simap-sla-monitoring-v4", + "version": 7 +} \ No newline at end of file diff --git a/src/tests/tools/simap_ai_engine/ai_engine/__init__.py b/src/tests/tools/simap_ai_engine/ai_engine/__init__.py index 288ad6ef9510fda28274b144036855949e019253..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/__init__.py +++ b/src/tests/tools/simap_ai_engine/ai_engine/__init__.py @@ -12,43 +12,3 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -AI Engine module. - -Provides REST API for AI-driven SLA policy analysis and violation detection -using data from SIMAP (network topology/devices) and InfluxDB (telemetry metrics). - -Module Structure: - - ai_model: AI/ML processing logic and SLA policy definitions - - api: Flask REST API endpoints - - clients: External service clients (SIMAP, InfluxDB, Decision Engine) - - config: Configuration management - - tests: Test suite - -Public API: - - AIEngineAPI: Main orchestrator and Flask application - - AIModelProcessor: AI/ML analysis engine - - SLAPolicyConfig: SLA policy configuration data model - - SimapDataFetcher: SIMAP client for device/topology data - - InfluxDBFetcher: InfluxDB client for telemetry metrics - - DecisionEngineClient: Decision engine notification client - - create_ai_engine_blueprint: Flask blueprint factory -""" - -from .ai_model.ai_processor import AIModelProcessor -from .api.api_blueprint import create_ai_engine_blueprint -from .engine import AIEngineAPI -from .clients.decision_client import DecisionEngineClient -from .clients.influxdb_fetcher import InfluxDBFetcher -from .clients.simap_fetcher import SimapDataFetcher -from .ai_model.sla_policy import SLAPolicyConfig - -__all__ = [ - 'AIEngineAPI', - 'AIModelProcessor', - 'DecisionEngineClient', - 'InfluxDBFetcher', - 'SimapDataFetcher', - 'SLAPolicyConfig', - 'create_ai_engine_blueprint', -] diff --git a/src/tests/tools/simap_ai_engine/ai_engine/ai_model/ai_processor.py b/src/tests/tools/simap_ai_engine/ai_engine/ai_model/ai_processor.py index e9fa99ca2a0446254f956572889ae39bab4816cb..832d09d71d906dd9a4957cf441144450fdae75f5 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/ai_model/ai_processor.py +++ b/src/tests/tools/simap_ai_engine/ai_engine/ai_model/ai_processor.py @@ -20,13 +20,13 @@ Provides AI/ML processing functionality for SLA analysis. import logging from datetime import datetime, timezone from random import Random -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, TYPE_CHECKING -from numpy import average import pandas as pd from statsmodels.tsa.holtwinters import ExponentialSmoothing -from .sla_policy import SLAPolicyConfig +if TYPE_CHECKING: + from ..clients.influxdb_fetcher import InfluxDBFetcher LOGGER = logging.getLogger(__name__) @@ -40,86 +40,128 @@ class AIModelProcessor: generating recommendations. """ - def __init__(self) -> None: + def __init__(self, influx_fetcher: Optional['InfluxDBFetcher'] = None) -> None: """ Initialize the AIModelProcessor. + Args: + influx_fetcher: InfluxDBFetcher instance for writing predicted telemetry. + If None, predicted telemetry will not be written to DB. + Loads AI models and prepares the processor for data analysis. """ LOGGER.info("AIModelProcessor initialized") + self._influx_fetcher = influx_fetcher # TODO: Load AI/ML models here # Example: self.model = load_model('sla_violation_detector.h5') def ai_model_processor( self, - metric_values: list[float] - ) -> Optional[list[float]]: + metric_values: list[dict[str, Any]], + ) -> Optional[list[dict[str, Any]]]: """ Process device and performance data through AI models. Args: - metric_values: List of performance metric values. + metric_values: List of dictionaries containing performance metric values. + Each dict has keys like 'bandwidth_utilization', 'latency', etc. Returns: - List of 3 forecasted values, or None if insufficient data. + List of dicts containing forecasted values for each metric, + or None if insufficient data. """ LOGGER.debug("Processing data through AI models") - # LOGGER.debug(f"Number of performance data points: {len(metric_values)}") + LOGGER.debug(f"Number of performance data points: {len(metric_values)}") - if not metric_values or len(metric_values) < 4: - LOGGER.warning("Insufficient data for forecasting") + if not metric_values or len(metric_values) < 3: + LOGGER.warning("Insufficient data for forecasting (need at least 3 samples)") return None + results = [] + try: - # Convert metric_values to pandas Series - data = pd.Series(metric_values) - - # Create and fit Exponential Smoothing model - model = ExponentialSmoothing( - data, - trend="add", - seasonal=None # No seasonal component for this data - ) - - fit = model.fit() - - # Forecast next 3 values - forecast = fit.forecast(steps=3) + # Convert list of dicts to DataFrame for easier processing + df = pd.DataFrame(metric_values) - forecasted_values = forecast.tolist() + # Process each metric column separately + for column in df.columns: + data = df[column] + + # Skip non-numeric columns + if not pd.api.types.is_numeric_dtype(data): + LOGGER.debug(f"Skipping non-numeric column: {column}") + continue + + # Remove NaN values + data = data.dropna() + + if len(data) < 3: + LOGGER.warning(f"Insufficient data for column {column} (need at least 3 samples)") + continue + + LOGGER.debug(f"Processing column: {column} with {len(data)} samples") + + # Create and fit Exponential Smoothing model + model = ExponentialSmoothing( + endog = data, + trend = "add", + seasonal = None # No seasonal component for this data + ) + + fit = model.fit() + + # Forecast next 3 values + forecast = fit.forecast(steps=3) + forecasted_values = forecast.tolist() + + # Calculate confidence score based on model fit quality + # Using residual standard error as inverse confidence metric + error = {} + residuals = fit.resid + mse = (residuals ** 2).mean() + rmse = mse ** 0.5 + + error['mse'] = float(mse) + error['rmse'] = float(rmse) + + # Normalize confidence: lower RMSE = higher confidence + # Use data scale (std dev) to normalize RMSE + data_std = data.std() + + if data_std > 0: + normalized_error = rmse / data_std + # Convert to confidence score (0-1 range, higher is better) + confidence = max(0, min(1, 1 - normalized_error)) + if confidence < 0.9: + confidence += 0.1 # Boost confidence for borderline cases + else: + confidence = 0.5 # Default if std dev is 0 + + LOGGER.info(f"Metric: {column}, RMSE: {rmse:.4f}, Data Std: {data_std:.4f}, Confidence: {confidence:.4f}") + LOGGER.info(f"Forecasted next 3 values for {column}: {forecasted_values}") + + results.append({ + "metric_name": column, + "forecasted_values": forecasted_values, + "confidence": float(confidence), + "sample_interval": 5, + "error_metrics": error, + }) - # Calculate confidence score based on model fit quality - # Using residual standard error as inverse confidence metric - residuals = fit.resid - mse = (residuals ** 2).mean() - rmse = mse ** 0.5 + # Push results to DB via InfluxDB fetcher + if results and self._influx_fetcher: + self._influx_fetcher.write_predicted_telemetry(results) - # Normalize confidence: lower RMSE = higher confidence - # Use data scale (std dev) to normalize RMSE - data_std = data.std() - if data_std > 0: - normalized_error = rmse / data_std - # Convert to confidence score (0-1 range, higher is better) - confidence = max(0, min(1, 1 - normalized_error)) - else: - confidence = 0.5 # Default if std dev is 0 - - # LOGGER.info(f"Model RMSE: {rmse:.4f}, Confidence: {confidence:.4f}") - LOGGER.info(f"Forecasted next 3 values: {forecasted_values}") - - # return forecasted_values - return [confidence] + return results if results else None except Exception as e: - LOGGER.error(f"Error during forecasting: {e}") + LOGGER.error(f"Error during forecasting: {e}", exc_info=True) return None - def process_data( self, performance_data: Dict[str, Any], - sla_policy: SLAPolicyConfig ) -> Dict[str, Any]: """ Process device and performance data through AI models. @@ -139,28 +181,23 @@ class AIModelProcessor: LOGGER.debug(f"Number of performance data points: {len(metric_values)}") # LOGGER.debug(f"Performance data values: {metric_values}") - # forecasted_values = self.ai_model_processor(metric_values) - # if forecasted_values is None: - # LOGGER.warning("AI model processing failed or insufficient data") - - # if forecasted_values: - # # Exponential weights: more weight on earlier (starting) values - # # Example: for 3 values -> weights = [0.5, 0.33, 0.17] (exponential decay) - # weights = [2**(-i) for i in range(len(forecasted_values))] - # # Normalize weights to sum to 1 - # total_weight = sum(weights) - # weights = [w / total_weight for w in weights] - # score = average(forecasted_values, weights=weights) - # # LOGGER.debug(f"Weighted average with exponential weights: {score}, weights: {weights}") - # else: - # score = None - - score = self.ai_model_processor(metric_values) + if not metric_values: + LOGGER.warning("No performance data available for processing") + return { + 'model_result': None, + 'timestamp': datetime.now(UTC).isoformat() + } + result = self.ai_model_processor(metric_values) + + if result is None: + # fallback score structure + LOGGER.warning("AI model processing failed or insufficient data. See logs for details.") + return { + 'model_result': None, + 'timestamp': datetime.now(UTC).isoformat() + } return { - 'confidence_scores': score, - 'summary': { - 'sla_policy': sla_policy.to_dict(), - 'timestamp': datetime.now(timezone.utc).isoformat() - } + 'model_result': result, + 'timestamp': datetime.now(UTC).isoformat() } diff --git a/src/tests/tools/simap_ai_engine/ai_engine/ai_model/sla_policy.py b/src/tests/tools/simap_ai_engine/ai_engine/ai_model/sla_policy.py index ff957ad0a071dd8f50fa9dd08a157dda451f252c..116d6541ce0523b93239fb8624132e511fc1075a 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/ai_model/sla_policy.py +++ b/src/tests/tools/simap_ai_engine/ai_engine/ai_model/sla_policy.py @@ -33,11 +33,15 @@ class SLAPolicyConfig: bandwidth_utilization_threshold_pct: Maximum acceptable bandwidth utilization as a percentage (0-100). time_window_seconds: Time window in seconds for data analysis. + forecast_sample_interval_sec: Sampling interval in seconds for data collection. + forecast_sample_count: Minimum number of samples to fetch from database. """ simap_id: str latency_threshold_ms: float|None - bandwidth_utilization_threshold_pct: float|None + bandwidth_utilization: float|None time_window_seconds: int + forecast_sample_interval_sec: int + forecast_sample_count: int @classmethod def from_dict(cls, data: Dict[str, Any]) -> SLAPolicyConfig: @@ -47,7 +51,7 @@ class SLAPolicyConfig: Args: data: Dictionary containing the SLA policy configuration fields. Required keys: 'simap_id', 'latency_threshold_ms', - 'bandwidth_utilization_threshold_pct', 'time_window_seconds'. + 'bandwidth_utilization', 'time_window_seconds'. Supports nested 'sla_metrics' structure. Returns: @@ -62,14 +66,18 @@ class SLAPolicyConfig: simap_id = str(data['simap_id']) metrics = data['sla_metrics'] latency_threshold_ms = float(metrics['latency_threshold_ms']) - bandwidth_threshold = float(metrics.get('bandwidth_utilization_threshold_pct', 0.0)) - time_window = int(data.get('window_size_sec', 300)) + bandwidth_threshold = float(metrics.get('bandwidth_utilization', 0.0)) + time_window = int(data['history_window_size_sec']) + sample_interval = int(data['forecast_sample_interval_sec']) + forecast_sample_count = int(data['forecast_sample_count']) return cls( - simap_id = simap_id, - latency_threshold_ms = latency_threshold_ms, - bandwidth_utilization_threshold_pct = bandwidth_threshold, - time_window_seconds = time_window + simap_id = simap_id, + latency_threshold_ms = latency_threshold_ms, + bandwidth_utilization = bandwidth_threshold, + time_window_seconds = time_window, + forecast_sample_interval_sec = sample_interval, + forecast_sample_count = forecast_sample_count ) except KeyError as e: raise KeyError(f"Missing required field: {e.args[0]}") from e diff --git a/src/tests/tools/simap_ai_engine/ai_engine/api/api_blueprint.py b/src/tests/tools/simap_ai_engine/ai_engine/api/api_blueprint.py index 3a2865dfa7808ba07b5b173dac831cd51fbbf666..7f71ad69aa9c1d39c81a86f357ef3be2fe0f3440 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/api/api_blueprint.py +++ b/src/tests/tools/simap_ai_engine/ai_engine/api/api_blueprint.py @@ -19,10 +19,13 @@ Defines the REST API endpoints for the AI Engine. """ import logging +import threading +import time from datetime import datetime, timezone from flask import Blueprint, jsonify, request +import requests from ..config import Config from ..ai_model.ai_processor import AIModelProcessor @@ -33,45 +36,117 @@ from ..ai_model.sla_policy import SLAPolicyConfig LOGGER = logging.getLogger(__name__) +# Background analysis state - track multiple analyses by simap_id +_analysis_threads = {} # {simap_id: {'thread': Thread, 'stop_event': Event}} +_threads_lock = threading.Lock() + +# OSM endpoint configuration +END_HOST = '10.0.58.25' +END_PORT = 8084 +BASE_URL = f'http://{END_HOST}:{END_PORT}/osm/aiAnalyticsEvent/v1' + def create_ai_engine_blueprint( simap_fetcher: SimapDataFetcher, influxdb_fetcher: InfluxDBFetcher, - ai_processor: AIModelProcessor, - decision_client: DecisionEngineClient + ai_processor: AIModelProcessor, + decision_client: DecisionEngineClient ) -> Blueprint: - """ - Create the Flask Blueprint for the AI Engine REST API. - - This function creates and configures a Flask Blueprint with all the - REST API endpoints for the AI Engine. - - Args: - simap_fetcher: Initialized SimapDataFetcher instance. - influxdb_fetcher: Initialized InfluxDBFetcher instance. - ai_processor: Initialized AIModelProcessor instance. - decision_client: Initialized DecisionEngineClient instance. - - Returns: - Configured Flask Blueprint with routes: - - POST /api/v1/analyze: Run SLA policy analysis - - GET /api/v1/health: Health check endpoint - - GET /api/v1/config: Get current configuration - """ - blueprint = Blueprint('ai_engine', __name__, url_prefix='/api/v1') + + blueprint = Blueprint('ai_analytics', __name__, url_prefix='/api/v1') + + def _background_analysis_task(sla_policy: SLAPolicyConfig, duration_minutes: int, stop_event: threading.Event): + """ + Background task that periodically analyzes data and posts results. + Args: + sla_policy: SLA policy configuration for analysis. + duration_minutes: How long to run the analysis (in minutes). + stop_event: Threading event to signal task termination. + """ + simap_id = sla_policy.simap_id + _response = {} + + try: + LOGGER.info(f"[{simap_id}] Starting background analysis for {duration_minutes} minutes") + start_time = time.time() + end_time = start_time + (duration_minutes * 60) + iteration = 0 + + while time.time() < end_time and not stop_event.is_set(): + iteration += 1 + iteration_start = time.time() + + try: + LOGGER.debug(f"[{simap_id}] Analysis iteration {iteration} - Fetching performance data") + + performance_data = influxdb_fetcher.fetch_performance_data(sla_policy) + + LOGGER.debug(f"[{simap_id}] Analysis iteration {iteration} - Processing with AI models") + results = ai_processor.process_data(performance_data) + + results['simap_id'] = simap_id + # results['iteration'] = iteration + + _response['data'] = results + _response['status'] = 'success' + _response['message'] = f'Analysis completed successfully' + + + LOGGER.debug(f"[{simap_id}] Analysis iteration {iteration} - Posting results to {BASE_URL}") + LOGGER.debug(f"[{simap_id}] Results payload: {_response}") + response = requests.post( + BASE_URL, + json = _response, + timeout = 10, + headers = {'Content-Type': 'application/json'} + ) + + if response.status_code in (200, 201, 202): + LOGGER.info(f"[{simap_id}] Iteration {iteration}: Results posted successfully (status {response.status_code})") + else: + LOGGER.warning(f"[{simap_id}] Iteration {iteration}: POST returned status {response.status_code}: {response.text}") + + except Exception as e: + LOGGER.error(f"[{simap_id}] Error in analysis iteration {iteration}: {e}") + + # Wait for 30 seconds (accounting for processing time per iteration) + elapsed = time.time() - iteration_start + sleep_time = max(0, 30 - elapsed) + + if sleep_time > 0 and time.time() + sleep_time < end_time and not stop_event.is_set(): + LOGGER.debug(f"[{simap_id}] Sleeping for {sleep_time:.1f} seconds until next iteration") + stop_event.wait(timeout=sleep_time) # Use wait instead of sleep for immediate response + elif time.time() < end_time: + # Not enough time for another full iteration cycle, exit gracefully + LOGGER.debug(f"[{simap_id}] Insufficient time remaining for next iteration, terminating") + break + + if stop_event.is_set(): + LOGGER.info(f"[{simap_id}] Background analysis stopped after {iteration} iterations") + else: + LOGGER.info(f"[{simap_id}] Background analysis completed after {iteration} iterations. Time limit reached.") + + except Exception as e: + LOGGER.exception(f"[{simap_id}] Fatal error in background analysis task: {e}") + + finally: + # Clean up thread tracking + with _threads_lock: + if simap_id in _analysis_threads: + del _analysis_threads[simap_id] + LOGGER.info(f"[{simap_id}] Background analysis task terminated") @blueprint.route('/analyze', methods=['POST']) def analyze(): """ - Run SLA policy analysis. + Start SLA policy analysis in background. - Expects JSON payload with SLA policy configuration. - Orchestrates the full analysis workflow: - fetch metrics from InfluxDB, process through AI models, and - send results to Decision Engine. + Expects JSON payload with SLA policy configuration including duration_minutes. + Validates input and immediately returns 202 Accepted. + Analysis runs in background, posting results every 30 seconds for the specified duration. Returns: - JSON response with analysis results or error message. + JSON response with acceptance confirmation or error message. """ LOGGER.info("Received analysis request") @@ -107,57 +182,209 @@ def create_ai_engine_blueprint( 'status': 'error', 'message': f'Invalid field value: {str(e)}' }), 400 - - # Execute analysis workflow + + # Extract duration from request try: - # Step 1: Fetch device data from SIMAP - # (At the moment, leaving it as it is. No more needed, to be removed in future) - # LOGGER.debug("Step 1: Fetching device data from SIMAP") - # device_data = simap_fetcher.fetch_device_data(sla_policy) - - # Step 2: Fetch performance data from InfluxDB - LOGGER.debug(">>> Step 2: Fetching performance data from InfluxDB") - performance_data = influxdb_fetcher.fetch_performance_data( - sla_policy + duration_minutes = int(data.get('duration_minutes', 0)) + if duration_minutes <= 0: + raise ValueError("duration_minutes must be positive") + except (TypeError, ValueError) as e: + LOGGER.error(f"Invalid duration_minutes: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Invalid duration_minutes: {str(e)}' + }), 400 + + # Check if analysis is already running for this simap_id + with _threads_lock: + if sla_policy.simap_id in _analysis_threads: + thread_info = _analysis_threads[sla_policy.simap_id] + if thread_info['thread'].is_alive(): + LOGGER.warning(f"Analysis request rejected: analysis for SIMAP ID {sla_policy.simap_id} is already running") + return jsonify({ + 'status': 'error', + 'message': f'Analysis for SIMAP ID {sla_policy.simap_id} is already running. Stop it first.' + }), 409 # Conflict + + # Start background analysis task + try: + stop_event = threading.Event() + analysis_thread = threading.Thread( + target=_background_analysis_task, + args=(sla_policy, duration_minutes, stop_event), + daemon=True, + name=f"AI-Analysis-Thread-{sla_policy.simap_id}" ) + + # Register thread before starting + with _threads_lock: + _analysis_threads[sla_policy.simap_id] = { + 'thread': analysis_thread, + 'stop_event': stop_event, + 'start_time': datetime.now(timezone.utc).isoformat(), + 'duration_minutes': duration_minutes + } + + analysis_thread.start() + + LOGGER.info(f"Background analysis started for SIMAP ID {sla_policy.simap_id}, duration {duration_minutes} minutes") + + # Return immediate confirmation + return jsonify({ + 'status': 'accepted', + 'message': f'Analysis started successfully. Results will be posted every 30 seconds for {duration_minutes} minutes.', + 'simap_id': sla_policy.simap_id, + 'duration_minutes': duration_minutes, + 'endpoint': BASE_URL + }), 202 # Accepted + + except Exception as e: + # Clean up on failure + with _threads_lock: + if sla_policy.simap_id in _analysis_threads: + del _analysis_threads[sla_policy.simap_id] + LOGGER.exception(f"Failed to start background analysis: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Failed to start analysis: {str(e)}' + }), 500 - # >>> Step 3: Process data through AI models - LOGGER.debug(">>> Step 3: Processing data through AI models") - results = ai_processor.process_data( - performance_data, sla_policy - ) + @blueprint.route('/analyze/stop', methods=['POST']) + def stop_analyze(): + """ + Stop running analysis for a specific SIMAP ID. - # >>> Step 4: Send results to Decision Engine - LOGGER.debug(">>> Step 4: Sending results to Decision Engine") - if not decision_client.send_results(results): - LOGGER.error("Failed to send results to Decision Engine") + Expects JSON payload with simap_id. + Signals the background thread to stop gracefully. + + Returns: + JSON response with stop confirmation or error message. + """ + LOGGER.info("Received stop analysis request") + + # Parse and validate request JSON + try: + data = request.get_json() + if data is None: + LOGGER.error("Request body is empty or not valid JSON") return jsonify({ 'status': 'error', - 'message': 'Failed to send results to Decision Engine' - }), 500 + 'message': 'Request body must be valid JSON' + }), 400 + except Exception as e: + LOGGER.error(f"Failed to parse request JSON: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Invalid JSON: {str(e)}' + }), 400 - LOGGER.info("Analysis completed successfully") + # Extract simap_id + simap_id = data.get('simap_id') + if not simap_id: + LOGGER.error("Missing simap_id in request") return jsonify({ - 'status': 'success', - 'data': results, - 'message': 'Analysis completed successfully' - }), 200 + 'status': 'error', + 'message': 'Missing required field: simap_id' + }), 400 - except Exception as e: - # Check if this is a retry failure (service unavailable) - error_msg = str(e) - if 'Giving up' in error_msg or 'unavailable' in error_msg.lower(): - LOGGER.error(f"External service unavailable: {e}") + # Find and stop the thread + with _threads_lock: + if simap_id not in _analysis_threads: + LOGGER.warning(f"No running analysis found for SIMAP ID {simap_id}") return jsonify({ 'status': 'error', - 'message': f'External service unavailable: {error_msg}' - }), 503 - else: - LOGGER.exception(f"Unexpected error during analysis: {e}") + 'message': f'No running analysis found for SIMAP ID {simap_id}' + }), 404 + + thread_info = _analysis_threads[simap_id] + if not thread_info['thread'].is_alive(): + # Clean up dead thread + del _analysis_threads[simap_id] + LOGGER.warning(f"Analysis thread for SIMAP ID {simap_id} is not alive") return jsonify({ 'status': 'error', - 'message': f'Internal server error: {error_msg}' - }), 500 + 'message': f'Analysis for SIMAP ID {simap_id} is not running' + }), 404 + + # Signal thread to stop + thread_info['stop_event'].set() + LOGGER.info(f"Stop signal sent to analysis thread for SIMAP ID {simap_id}") + + return jsonify({ + 'status': 'success', + 'message': f'Stop signal sent to analysis for SIMAP ID {simap_id}', + 'simap_id': simap_id + }), 200 + + @blueprint.route('/analyze/stop-all', methods=['POST']) + def stop_all_analyses(): + """ + Stop all running analyses. + + Signals all background threads to stop gracefully. + + Returns: + JSON response with summary of stopped analyses. + """ + LOGGER.info("Received stop all analyses request") + + stopped_ids = [] + skipped_ids = [] + + with _threads_lock: + if not _analysis_threads: + LOGGER.info("No running analyses to stop") + return jsonify({ + 'status': 'success', + 'message': 'No running analyses to stop', + 'stopped_count': 0, + 'stopped_ids': [] + }), 200 + + # Signal all threads to stop + for simap_id, thread_info in list(_analysis_threads.items()): + if thread_info['thread'].is_alive(): + thread_info['stop_event'].set() + stopped_ids.append(simap_id) + LOGGER.info(f"Stop signal sent to analysis thread for SIMAP ID {simap_id}") + else: + skipped_ids.append(simap_id) + LOGGER.warning(f"Analysis thread for SIMAP ID {simap_id} is not alive, skipping") + + return jsonify({ + 'status': 'success', + 'message': f'Stop signal sent to {len(stopped_ids)} running analyses', + 'stopped_count': len(stopped_ids), + }), 200 + + @blueprint.route('/status', methods=['GET']) + def status(): + """ + Get status of all running analyses. + + Returns: + JSON response with list of running analyses. + """ + LOGGER.debug("Status check requested") + + with _threads_lock: + running_analyses = [ + { + 'simap_id': simap_id, + 'is_alive': info['thread'].is_alive(), + 'start_time': info['start_time'], + 'duration_minutes': info['duration_minutes'] + } + for simap_id, info in _analysis_threads.items() + ] + + return jsonify({ + 'running_count': len(running_analyses), + 'analyses': running_analyses, + 'timestamp': datetime.now(timezone.utc).isoformat() + }), 200 + + @blueprint.route('/health', methods=['GET']) def health(): @@ -181,20 +408,28 @@ def create_ai_engine_blueprint( Returns: JSON response with SIMAP and InfluxDB connection details. + Sensitive values (passwords, tokens) are masked. """ LOGGER.debug("Configuration requested") + + def mask_secret(value: str) -> str: + """Mask sensitive values for display.""" + if not value: + return '(not set)' + return f"{value[:2]}{'*' * (len(value) - 2)}" if len(value) > 2 else '***' + return jsonify({ 'simap': { 'scheme': Config.SIMAP_DATASTORE_SCHEME, 'address': Config.SIMAP_DATASTORE_ADDRESS, 'port': Config.SIMAP_DATASTORE_PORT, 'username': Config.SIMAP_DATASTORE_USERNAME, - 'password': Config.SIMAP_DATASTORE_PASSWORD + 'password': mask_secret(Config.SIMAP_DATASTORE_PASSWORD) }, 'influxdb': { 'host': Config.INFLUXDB_HOST, 'port': Config.INFLUXDB_PORT, - 'token': Config.INFLUXDB_TOKEN, + 'token': mask_secret(Config.INFLUXDB_TOKEN), 'database': Config.INFLUXDB_DATABASE }, 'api': { diff --git a/src/tests/tools/simap_ai_engine/ai_engine/clients/influxdb_fetcher.py b/src/tests/tools/simap_ai_engine/ai_engine/clients/influxdb_fetcher.py index d989011dcb24f01f2af3192fd13c467e4da9e0ae..11908b4617e2710417579e758dd08c7679ed4eaa 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/clients/influxdb_fetcher.py +++ b/src/tests/tools/simap_ai_engine/ai_engine/clients/influxdb_fetcher.py @@ -24,8 +24,9 @@ import logging from datetime import datetime, timezone from typing import Any, Dict +from numpy import average from common.tools.client.RetryDecorator import delay_exponential, retry -from influxdb_client_3 import InfluxDBClient3 +from influxdb_client_3 import InfluxDBClient3, Point, WritePrecision from ..ai_model.sla_policy import SLAPolicyConfig LOGGER = logging.getLogger(__name__) @@ -61,23 +62,19 @@ class InfluxDBFetcher: influxdb_token: Authentication token for InfluxDB. influxdb_database: Name of the InfluxDB database to query. """ - self.influxdb_host = influxdb_host - self.influxdb_port = influxdb_port - self.influxdb_token = influxdb_token + self.influxdb_host = influxdb_host + self.influxdb_port = influxdb_port + self.influxdb_token = influxdb_token self.influxdb_database = influxdb_database + self.influxdb_url = f"http://{influxdb_host}:{influxdb_port}" - # Construct full URL with port for InfluxDB v3 - self.influxdb_url = f"http://{influxdb_host}:{influxdb_port}" - - LOGGER.info( - f"InfluxDBFetcher initialized for database '{influxdb_database}' " - f"at {self.influxdb_url}" - ) + LOGGER.info( f"InfluxDBFetcher initialized for database '{influxdb_database}' " + f"at {self.influxdb_url}") self._client = InfluxDBClient3( - host=self.influxdb_url, - token=self.influxdb_token, - database=self.influxdb_database - ) + host = self.influxdb_url, + token = self.influxdb_token, + database = self.influxdb_database + ) def is_connected(self) -> bool: """ @@ -100,7 +97,6 @@ class InfluxDBFetcher: def process_response_table( self, table: Any, - metric_to_process: str ) -> Dict[str, Any]: """ Process InfluxDB response table into structured data. @@ -111,45 +107,42 @@ class InfluxDBFetcher: Returns: Dictionary containing processed performance metrics and values. """ - metrics = [] - if table is not None and isinstance(table, pd.DataFrame): - metrics = table.to_dict('records') - else: - LOGGER.warning("No data returned from InfluxDB query ") - - keys_to_check = [ 'time', 'link_id'] - sla_metric = 'bandwidth_utilization' - keys_to_check.append(sla_metric) - - # if metric_to_process == 'latency_threshold_ms': - # sla_metric = 'latency' - # keys_to_check.append(sla_metric) - # elif metric_to_process == 'bandwidth_utilization_threshold_pct': - # sla_metric = 'bandwidth_utilization' - # keys_to_check.append(sla_metric) - # else: - # sla_metric = None - # LOGGER.warning(f"Unknown metric to process: {metric_to_process}") - + if table is None or not isinstance(table, pd.DataFrame): + LOGGER.warning("No data returned from InfluxDB query") + return { + 'metrics': [], + 'metric_values': [] + } + + LOGGER.debug(f"Processing {len(table)} rows from InfluxDB response") - LOGGER.debug(f"Processed {len(metrics)} metric records from InfluxDB response") - data = [] - metric_value = [] - if sla_metric is not None: - for row in metrics: - # LOGGER.debug(f"Metric record: {row}") - new_row = {} - for key, value in row.items(): - if key in keys_to_check: - new_row[key] = value - if key == sla_metric: - metric_value.append(value) - data.append(new_row) - LOGGER.debug(f">>> Processed metric values: {metric_value}") + # Sort by time column (old to new) if it exists + if 'time' in table.columns: + table = table.sort_values(by='time', ascending=True) + LOGGER.debug("Sorted data by time (ascending: old to new)") + + # Define columns for each output dataframe + full_columns = ['bandwidth_utilization', 'latency', 'time', 'link_id'] + metric_columns = ['bandwidth_utilization', 'latency'] + + # Create DataFrame 1: Full metrics with time and link_id + # Filter only columns that exist in the table + available_full_cols = [col for col in full_columns if col in table.columns] + df_full = table[available_full_cols] + metrics = df_full.to_dict('records') + + # Create DataFrame 2: Only metric values (bandwidth_utilization, latency) + available_metric_cols = [col for col in metric_columns if col in table.columns] + df_metrics = table[available_metric_cols] + metric_values = df_metrics.to_dict('records') + + LOGGER.debug(f"Processed {len(metrics)} metric records with {len(available_full_cols)} columns") + LOGGER.debug(f"Extracted {len(metric_values)} metric value records with {len(available_metric_cols)} columns") + LOGGER.info(f"Response values are: {metric_values} ") return { - 'metrics': data, - 'metric_values': metric_value + 'metrics': metrics, + 'metric_values': metric_values } @@ -158,77 +151,86 @@ class InfluxDBFetcher: self, sla_policy: SLAPolicyConfig ) -> Dict[str, Any]: - """ - Fetch performance metrics from InfluxDB. - - Queries InfluxDB for time-series performance data based on the - SLA policy parameters and device information. The retry decorator - ensures resilience against transient failures. - - Args: - sla_policy: The SLA policy configuration containing time window - and threshold parameters. - - Returns: - Dictionary containing: - - 'metrics': List of performance metric records. - - 'timestamp_range': Dictionary with 'start' and 'end' - timestamps for the queried data. - - Raises: - Exception: If InfluxDB is unavailable after all retries, - or if the query fails. - """ if not self.is_connected(): raise ConnectionError("Unable to connect to InfluxDB") if sla_policy.latency_threshold_ms is None: raise ValueError("SLA policy missing latency threshold for data fetch") - - metric_to_process = sla_policy.latency_threshold_ms LOGGER.debug( f"Fetching performance data for simap_id={sla_policy.simap_id}, " - f"time_window={sla_policy.time_window_seconds}s " - f"for metric={metric_to_process} " + f"time_window={sla_policy.time_window_seconds}s, " + f"required_samples={sla_policy.forecast_sample_count}" ) try: - query = ( - f"SELECT * FROM link_telemetry " - f"WHERE link_id = '{sla_policy.simap_id}' " - f"AND time >= now() - INTERVAL '{sla_policy.time_window_seconds} seconds' " - f"ORDER BY time DESC" - ) - - LOGGER.debug(f"Executing query: {query}") - - table = self._client.query(query=query, language="sql", mode="pandas") - - result = self.process_response_table(table, metric_to_process) - # metrics = result.get('metrics', []) + # Initial time window + current_time_window = sla_policy.time_window_seconds + max_attempts = 3 + attempt = 1 + final_table = None - # start_time = datetime.now(timezone.utc) - # end_time = datetime.now(timezone.utc) - # if metrics: - # times = [m.get('time') for m in metrics if m.get('time')] - # if times is not None: - # start_time = min(times) - # end_time = max(times) + while attempt <= max_attempts: + query = ( + f"SELECT * FROM link_telemetry " + f"WHERE link_id = '{sla_policy.simap_id}' " + f"AND time >= now() - INTERVAL '{current_time_window} seconds' " + f"ORDER BY time DESC" + ) + + LOGGER.debug(f"Attempt {attempt}/{max_attempts}: Executing query with time_window={current_time_window}s") + LOGGER.debug(f"Query: {query}") + + final_table = self._client.query(query=query, language="sql", mode="pandas") + + # Count samples from raw table + samples_fetched = 0 if final_table is None or not isinstance(final_table, pd.DataFrame) else len(final_table) + + LOGGER.info( + f"Attempt {attempt}: Fetched {samples_fetched} samples " + f"(required: {sla_policy.forecast_sample_count})" + ) + + # Check if we have enough samples + if samples_fetched >= sla_policy.forecast_sample_count: + LOGGER.info(f"Required samples met") + break + + # If not enough samples and not last attempt, calculate new time window + if attempt < max_attempts: + if samples_fetched > 0: + # Calculate required time window based on sample density + # Formula: new_window = current_window * (required_samples / fetched_samples) * 1.2 + # The 1.2 factor adds 20% buffer to account for non-uniform data distribution + ratio = sla_policy.forecast_sample_count / samples_fetched + current_time_window = int(current_time_window * ratio * 1.2) + LOGGER.debug(f"Extending time window to {current_time_window}s(ratio: {ratio:.2f})") + else: + # If no samples, double the time window + current_time_window *= 2 + LOGGER.warning(f"No samples found, doubling time window to {current_time_window}s") + + attempt += 1 + else: + LOGGER.warning( + f"Max attempts reached. Returning {samples_fetched} samples " + f"(required: {sla_policy.forecast_sample_count})" + ) + break - LOGGER.info(f"Fetched {len(result.get('metrics', []))} metric records for simap_id={sla_policy.simap_id}") + # Process the response table after fetch is completed + result = self.process_response_table(final_table) return { - 'metrics': result.get('metrics', []), - 'metric_values': result.get('metric_values', []), - 'timestamp_range': { - # 'start': start_time.isoformat() if isinstance(start_time, datetime) else str(start_time), - # 'end': end_time.isoformat() if isinstance(end_time, datetime) else str(end_time) - } + 'metrics': result.get('metrics', []), + 'metric_values': result.get('metric_values', []), + 'fetch_window_size_sec': current_time_window, + 'timestamp_range': {}, } - finally: - self._client.close() + except Exception as e: + LOGGER.error(f"Error fetching performance data from InfluxDB: {e}", exc_info=True) + raise e @RETRY_DECORATOR def notify_telemetry_update( @@ -255,7 +257,7 @@ class InfluxDBFetcher: Exception: If InfluxDB is unavailable after all retries. """ status = notification_data.get('status') - timestamp = notification_data.get('timestamp', 'N/A') + timestamp = notification_data.get('timestamp', datetime.now(timezone.utc).isoformat()) # Validate status value if status not in {'UPGRADE', 'DOWNGRADE'}: @@ -268,18 +270,82 @@ class InfluxDBFetcher: f"status={status}, timestamp={timestamp}" ) - # TODO: Implement actual InfluxDB write - # Example implementation: - # from influxdb_client_3 import InfluxDBClient3, Point - # client = InfluxDBClient3( - # host=self.influxdb_host, - # token=self.influxdb_token, - # database=self.influxdb_database - # ) - # point = Point("telemetry_notifications") \ - # .tag("status", status) \ - # .field("timestamp", timestamp) - # client.write(point) + point = Point("telemetry_notifications") \ + .tag("status", status) \ + .field("timestamp", timestamp) + self._client.write(point) LOGGER.info("Telemetry notification stored successfully in InfluxDB") return True + + def write_predicted_telemetry( + self, + results: list[dict[str, Any]], + network_id: str = 'e2e', + link_id: str = 'E2E-L1' + ) -> bool: + """ + Write predicted telemetry (forecasted metrics) to InfluxDB. + + Args: + results: List of forecast results from AIModelProcessor. + Each dict contains metric_name, forecasted_values, etc. + network_id: Network identifier (default: 'e2e') + link_id: Link identifier (default: 'E2E-L1') + + Returns: + True if write succeeded, False otherwise. + """ + if not self.is_connected(): + LOGGER.warning("InfluxDB client not initialized, skipping write to DB") + return False + + if not results: + LOGGER.warning("No results to write to DB") + return False + + try: + # Extract metric predictions and calculate averages + metric_averages = {} + for result in results: + metric_name = result.get("metric_name") + forecasted_values = result.get("forecasted_values", []) + + if metric_name and forecasted_values: + # Calculate average of forecasted values + avg_value = float(average(forecasted_values)) + metric_averages[metric_name] = avg_value + LOGGER.debug(f"Average forecast for {metric_name}: {avg_value:.4f}") + + # Create InfluxDB point for predicted telemetry + point = ( + Point("predicted_telemetry") + .tag("network_id", network_id) + .tag("link_id", link_id) + ) + + # Add predicted metric fields with pred_ prefix + if "bandwidth_utilization" in metric_averages: + point = point.field( + "pred_bandwidth_utilization", + metric_averages["bandwidth_utilization"] + ) + + if "latency" in metric_averages: + point = point.field( + "pred_latency", + metric_averages["latency"] + ) + + # Write to InfluxDB + self._client.write(record=point, write_precision=WritePrecision.S) + + LOGGER.info( + "Wrote predicted telemetry to InfluxDB: network=%s, link=%s, metrics=%s", + network_id, link_id, list(metric_averages.keys()) + ) + return True + + except Exception as e: + LOGGER.error(f"Failed to write predicted telemetry to InfluxDB: {e}", exc_info=True) + return False diff --git a/src/tests/tools/simap_ai_engine/ai_engine/engine.py b/src/tests/tools/simap_ai_engine/ai_engine/engine.py index 17e9db74b5bc49e889ae39ec8a067fe9496a4797..547666ed55283f462bac9880fb72a6dddb673357 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/engine.py +++ b/src/tests/tools/simap_ai_engine/ai_engine/engine.py @@ -65,7 +65,10 @@ class AIEngineAPI: influxdb_database = Config.INFLUXDB_DATABASE ) - self.ai_processor = AIModelProcessor() + # Pass InfluxDB fetcher to AI processor for writing predicted telemetry + self.ai_processor = AIModelProcessor( + influx_fetcher=self.influxdb_fetcher + ) self.decision_client = DecisionEngineClient() # Create Flask application diff --git a/src/tests/tools/simap_ai_engine/ai_engine/requirements.in b/src/tests/tools/simap_ai_engine/ai_engine/requirements.in index a7a19a0120fbdddbcbaeb0e19ef6f459ddb06225..d7cf39502a46ad3ff29a7d9b52666e5e0b55a59f 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/requirements.in +++ b/src/tests/tools/simap_ai_engine/ai_engine/requirements.in @@ -16,3 +16,6 @@ flask>=2.3.0 requests>=2.31.0 influxdb3-python>=0.8.0 +pandas>=2.0.0 +statsmodels>=0.14.0 +numpy>=1.24.0 diff --git a/src/tests/tools/simap_ai_engine/ai_engine/tests/run_test.sh b/src/tests/tools/simap_ai_engine/ai_engine/tests/run_test.sh index 372474ddb6b25c4e0beb752e963aaf9f3479d1ce..191bff28a2fe50912e5ac1a638829835d45fbd79 100755 --- a/src/tests/tools/simap_ai_engine/ai_engine/tests/run_test.sh +++ b/src/tests/tools/simap_ai_engine/ai_engine/tests/run_test.sh @@ -18,23 +18,28 @@ # Usage: ./run_test.sh # Navigate to TFS root directory -cd "$(dirname "$0")/../../../../.." +cd "$(dirname "$0")" # Set Python path to include TFS src and AI Analytics Engine export PYTHONPATH="${PWD}/src:${PWD}/src/tests/tools/simap_ai_engine" # Activate virtual environment if not already activated -if [ -z "$VIRTUAL_ENV" ]; then - if [ -d "$HOME/.env-simap" ]; then - source "$HOME/.env-simap/bin/activate" - fi -fi +# if [ -z "$VIRTUAL_ENV" ]; then +# if [ -d "$HOME/.env-simap" ]; then +# source "$HOME/.env-simap/bin/activate" +# fi +# fi +echo "$PWD" +echo "Running AI Analytics Engine API tests..." # Define log file path -LOG_FILE="${PWD}/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api.log" +LOG_FILE="${PWD}/test_api_docker.log" +TEST_FILE="${PWD}/test_api_docker.py" # Run the test with logging enabled and capture output -pytest src/tests/tools/simap_ai_engine/ai_engine/tests/test_api.py::test_analyze_endpoint \ + +pytest $TEST_FILE::test_stop_all_analyses_endpoint \ +# pytest $TEST_FILE::test_analyze_endpoint \ -v -s \ --log-cli-level=DEBUG \ --log-file="${LOG_FILE}" \ diff --git a/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api.py b/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api.py index 72bb1f0b247f124e59d70ebee11939cfdb7f8d70..a572702b30118d31a16e0bc80ea0f12e4664fc05 100644 --- a/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api.py +++ b/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api.py @@ -19,7 +19,6 @@ This module tests the /api/v1/analyze endpoint by starting the server and sending HTTP requests. """ -from csv import Error import logging import os import sys @@ -116,21 +115,20 @@ def test_analyze_endpoint(ai_engine_server): - Returns JSON response with status and message fields """ - if ai_engine_server is Error: - pytest.fail("AI Analytics Engine server failed to start") - LOGGER.info(">>>>>> Starting test_case test_analyze_endpoint: POST /api/v1/analyze endpoint") # Prepare test payload with SLA policy configuration payload = { "simap_id": "E2E-L1", "sla_metrics": { - "latency_threshold_ms": 10, - "bandwidth_utilization_threshold_pct": 0.0 + "latency_threshold_ms": 0, + "bandwidth_utilization": 0.0 }, - "window_size_sec": 600 + "history_window_size_sec": 600, + "forecast_sample_interval_sec": 5, + "forecast_sample_count": 120, } - + LOGGER.info(f"Sending analyze request with payload: {payload}") # Send POST request to analyze endpoint diff --git a/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api_docker.py b/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api_docker.py new file mode 100644 index 0000000000000000000000000000000000000000..94ec851bb9d47300d6f584304ae074e1dc8c4447 --- /dev/null +++ b/src/tests/tools/simap_ai_engine/ai_engine/tests/test_api_docker.py @@ -0,0 +1,333 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Test suite for AI Analytics Engine REST API running in Docker. + +This module tests the /api/v1/analyze endpoint by connecting to the +AI-Engine Docker container exposed on port 8084. +""" + +import logging +import time + +import pytest +import requests + +# Configure logging for tests +logging.basicConfig( + level=logging.DEBUG, + format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" +) +LOGGER = logging.getLogger(__name__) + +# Test server configuration - Docker container exposed port +TEST_HOST = '127.0.0.1' +TEST_PORT = 8084 # Docker container port mapping: 8084->8080 + +BASE_URL = f'http://{TEST_HOST}:{TEST_PORT}' + + + +@pytest.fixture(scope='module') +def ai_engine_server_connection_confirmation(): + """ + Fixture to verify the AI Analytics Engine Docker container is running. + + Checks connectivity to the Docker container and yields control to tests. + Assumes the container is already running (docker run -p 8084:8080 ai-engine:latest). + """ + LOGGER.info("Checking AI Analytics Engine Docker container availability") + + # Wait for server to be ready + max_retries = 15 + for i in range(max_retries): + try: + LOGGER.debug(f"Checking Docker container connectivity... ({i+1}/{max_retries})") + response = requests.get(f'{BASE_URL}/api/v1/config', timeout=2) + if response.status_code == 200: + LOGGER.info("AI Analytics Engine Docker container is ready") + break + except requests.exceptions.RequestException as e: + LOGGER.debug(f"Container not ready yet: {e}") + if i < max_retries - 1: + time.sleep(2) + else: + raise RuntimeError( + f"Failed to connect to AI Analytics Engine Docker container at {BASE_URL}. " + f"Ensure container is running: docker run -p 8084:8080 ai-engine:latest" + ) + + yield + + LOGGER.info("AI Analytics Engine Docker test fixture cleanup complete") + + + +def test_analyze_endpoint(ai_engine_server_connection_confirmation): + """ + Test POST /api/v1/analyze endpoint. + + Validates that the analyze endpoint: + - Accepts valid SLA policy JSON payload + - Returns 202 Accepted for background processing + - Returns JSON response with status, message, simap_id, duration, and endpoint fields + """ + + LOGGER.info(">>>>>> Starting test_case test_analyze_endpoint: POST /api/v1/analyze endpoint") + + # Prepare test payload with SLA policy configuration + payload = { + "simap_id": "E2E-L1", + "sla_metrics": { + "latency_threshold_ms": 0, + "bandwidth_utilization": 0.0 + }, + "history_window_size_sec": 60, + "forecast_sample_interval_sec": 30, + "forecast_sample_count": 50, + "duration_minutes": 10 # Short duration for testing + } + + LOGGER.info(f"Sending analyze request with payload: {payload}") + + # Send POST request to analyze endpoint + response = requests.post( + f'{BASE_URL}/api/v1/analyze', + json=payload, + timeout=10 + ) + + LOGGER.info(f"Analyze response status: {response.status_code}") + + # Parse JSON response + data = response.json() + LOGGER.info(f"Analyze response body: {data}") + + # Validate response structure + assert 'status' in data, "Response missing 'status' field" + assert 'message' in data, "Response missing 'message' field" + + # Accept either accepted (202) or service unavailable (503) + # 503 is expected if SIMAP server or InfluxDB are not running + if response.status_code == 202: + LOGGER.info("Analysis started successfully") + assert data['status'] == 'accepted', f"Expected status 'accepted', got '{data['status']}'" + assert data['simap_id'] == 'E2E-L1', f"Expected simap_id 'E2E-L1', got '{data['simap_id']}'" + assert data['duration_minutes'] == 2, f"Expected duration_minutes 2, got '{data['duration_minutes']}'" + assert '/osm/aiAnalyticsEvent/v1' in data['endpoint'], f"Expected '/osm/aiAnalyticsEvent/v1' in endpoint" + elif response.status_code == 503: + assert data['status'] == 'error', f"Expected status 'error' for 503, got '{data['status']}'" + pytest.fail("External service unavailable (expected if SIMAP/InfluxDB not running)") + elif response.status_code == 400: + LOGGER.error(f"Bad request: {data['message']}") + assert data['status'] == 'error', f"Expected status 'error' for 400, got '{data['status']}'" + pytest.fail(f"Bad request: {data['message']}") + else: + pytest.fail(f"Unexpected status code: {response.status_code}") + + LOGGER.info("Analyze endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_analyze_endpoint") + + +def test_status_endpoint(ai_engine_server): + """ + Test GET /api/v1/status endpoint. + + Validates that the status endpoint: + - Returns list of running analyses + - Includes running_count, analyses array, and timestamp + - Each analysis has simap_id, is_alive, start_time, duration_minutes + """ + + LOGGER.info(">>>>>> Starting test_case test_status_endpoint: GET /api/v1/status endpoint") + + # Send GET request to status endpoint + response = requests.get( + f'{BASE_URL}/api/v1/status', + timeout=5 + ) + + LOGGER.info(f"Status response status: {response.status_code}") + assert response.status_code == 200, f"Expected status code 200, got {response.status_code}" + + # Parse JSON response + data = response.json() + LOGGER.info(f"Status response body: {data}") + + # Validate response structure + assert 'running_count' in data, "Response missing 'running_count' field" + assert 'analyses' in data, "Response missing 'analyses' field" + assert 'timestamp' in data, "Response missing 'timestamp' field" + assert isinstance(data['analyses'], list), "Field 'analyses' should be a list" + + # If there are running analyses, validate their structure + if data['running_count'] > 0: + LOGGER.info(f"Found {data['running_count']} running analyses") + for analysis in data['analyses']: + assert 'simap_id' in analysis, "Analysis missing 'simap_id' field" + assert 'is_alive' in analysis, "Analysis missing 'is_alive' field" + assert 'start_time' in analysis, "Analysis missing 'start_time' field" + assert 'duration_minutes' in analysis, "Analysis missing 'duration_minutes' field" + else: + LOGGER.info("No analyses currently running") + + LOGGER.info("Status endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_status_endpoint") + + +def test_stop_analyze_endpoint(ai_engine_server): + """ + Test POST /api/v1/analyze/stop endpoint. + + Validates that the stop endpoint: + - Stops a running analysis by simap_id + - Returns 404 if no analysis found + - Returns 200 on successful stop + """ + + LOGGER.info(">>>>>> Starting test_case test_stop_analyze_endpoint: POST /api/v1/analyze/stop endpoint") + + # First, start an analysis to test stopping it + start_payload = { + "simap_id": "L2", + "sla_metrics": { + "latency_threshold_ms": 0, + "bandwidth_utilization": 0.0 + }, + "history_window_size_sec": 60, + "forecast_sample_interval_sec": 5, + "forecast_sample_count": 50, + "duration_minutes": 5 # Longer duration so we can stop it + } + + LOGGER.info(f"Starting analysis with payload: {start_payload}") + start_response = requests.post( + f'{BASE_URL}/api/v1/analyze', + json=start_payload, + timeout=10 + ) + + # Only proceed with stop test if start was successful + if start_response.status_code == 202: + LOGGER.info("Analysis started, now testing stop endpoint") + + # Wait a moment to ensure thread is running + time.sleep(2) + + # Test stopping the analysis + stop_payload = {"simap_id": start_payload["simap_id"]} + + LOGGER.info(f"Sending stop request with payload: {stop_payload}") + response = requests.post( + f'{BASE_URL}/api/v1/analyze/stop', + json=stop_payload, + timeout=10 + ) + + LOGGER.info(f"Stop response status: {response.status_code}") + + # Parse JSON response + data = response.json() + LOGGER.info(f"Stop response body: {data}") + + assert response.status_code == 200, f"Expected status code 200, got {response.status_code}" + assert data['status'] == 'success', f"Expected status 'success', got '{data['status']}'" + assert data['simap_id'] == start_payload["simap_id"], f"Expected simap_id '{start_payload['simap_id']}', got '{data['simap_id']}'" + + LOGGER.info("Stop successful, verifying analysis is stopped") + + # Verify the analysis is no longer running + time.sleep(1) + status_response = requests.get(f'{BASE_URL}/api/v1/status', timeout=5) + status_data = status_response.json() + + # Check if L1 is still in the list + running_ids = [a['simap_id'] for a in status_data['analyses'] if a['is_alive']] + assert 'L2' not in running_ids, "Analysis should be stopped" + + LOGGER.info("Verified analysis was stopped") + else: + LOGGER.warning(f"Skipping stop test - could not start analysis (status {start_response.status_code})") + pytest.skip("Could not start analysis to test stop functionality") + + # Test stopping non-existent analysis + LOGGER.info("Testing stop on non-existent analysis") + stop_nonexistent = {"simap_id": "nonexistent-id"} + response = requests.post( + f'{BASE_URL}/api/v1/analyze/stop', + json=stop_nonexistent, + timeout=10 + ) + + LOGGER.info(f"Stop nonexistent response status: {response.status_code}") + data = response.json() + LOGGER.info(f"Stop nonexistent response body: {data}") + + assert response.status_code == 404, f"Expected status code 404 for nonexistent, got {response.status_code}" + assert data['status'] == 'error', f"Expected status 'error', got '{data['status']}'" + + LOGGER.info("Stop endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_stop_analyze_endpoint") + + +def test_stop_all_analyses_endpoint(ai_engine_server): + """ + Test POST /api/v1/analyze/stop-all endpoint. + + Validates that the stop-all endpoint: + - Stops all running analyses + - Returns summary with stopped_count and stopped_ids + - Handles case when no analyses are running + """ + + LOGGER.info(">>>>>> Starting test_case test_stop_all_analyses_endpoint: POST /api/v1/analyze/stop-all endpoint") + started_ids = ["E2E-L1"] + + + # Only proceed if at least one analysis started + if len(started_ids) > 0: + LOGGER.info(f"Started {len(started_ids)} analyses, now testing stop-all endpoint") + + # Call stop-all endpoint + LOGGER.info("Sending stop-all request") + response = requests.post( + f'{BASE_URL}/api/v1/analyze/stop-all', + timeout=10 + ) + + LOGGER.info(f"Stop-all response status: {response.status_code}") + + # Parse JSON response + data = response.json() + LOGGER.info(f"Stop-all response body: {data}") + + # Validate response + assert response.status_code == 200, f"Expected status code 200, got {response.status_code}" + assert data['status'] == 'success', f"Expected status 'success', got '{data['status']}'" + assert 'stopped_count' in data, "Response missing 'stopped_count' field" + + # Verify that analyses were stopped + assert data['stopped_count'] > 0, "Expected at least one analysis to be stopped" + + LOGGER.info("Stop-all successful, verified all analyses stopped") + else: + LOGGER.warning("Could not start any analyses, skipping stop-all test") + pytest.skip("Could not start analyses to test stop-all functionality") + + LOGGER.info("Stop-all endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_stop_all_analyses_endpoint") + + +# TODO: Add here test for notify endpoint from @blueprint.route('/notify', methods=['POST']) \ No newline at end of file diff --git a/src/tests/tools/simap_ai_engine/deploy.sh b/src/tests/tools/simap_ai_engine/deploy.sh deleted file mode 100755 index d5495a2fa5d18ad583d50f10ba8f75d6a41adb0f..0000000000000000000000000000000000000000 --- a/src/tests/tools/simap_ai_engine/deploy.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/../../../.." && pwd)" - -echo "Building SIMAP DataStore..." -cd "${REPO_ROOT}" -docker buildx build -t simap-datastore:mock -f ./src/tests/tools/simap_datastore/Dockerfile . - -echo "Building NCE-FAN Controller..." -docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . - -echo "Building NCE-T Controller..." -docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . - -echo "Building AI Engine..." -docker buildx build -t ai-engine:latest -f ./src/tests/tools/simap_ai_engine/ai_engine/Dockerfile . - -# echo "Building Traffic Changer..." -# docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . - -echo "Deploying support services..." -docker run --detach --name simap-datastore --publish 8080:8080 simap-datastore:mock -docker run --detach --name nce-fan-ctrl --publish 8081:8080 \ - --env SIMAP_ADDRESS=172.17.0.1 \ - --env SIMAP_PORT=8080 \ - nce-fan-ctrl:mock -docker run --detach --name nce-t-ctrl --publish 8082:8080 \ - --env SIMAP_ADDRESS=172.17.0.1 \ - --env SIMAP_PORT=8080 \ - nce-t-ctrl:mock - -echo "Deploying AI Engine..." -docker run --detach --name ai-engine --publish 8084:8080 \ - --env SIMAP_DATASTORE_ADDRESS=172.17.0.1 \ - --env SIMAP_DATASTORE_PORT=8080 \ - --env SIMAP_DATASTORE_USERNAME=admin \ - --env SIMAP_DATASTORE_PASSWORD=admin \ - ai-engine:latest -# docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock - -sleep 2 -docker ps -a -echo "Deployment complete." diff --git a/src/tests/tools/simap_ai_engine/destroy.sh b/src/tests/tools/simap_ai_engine/destroy.sh index 6da3f984029846aa91bc9000e0e3f55a1644974d..1d72ad52ac5fab7d53d065a909b4a103fc4fadb4 100755 --- a/src/tests/tools/simap_ai_engine/destroy.sh +++ b/src/tests/tools/simap_ai_engine/destroy.sh @@ -16,10 +16,6 @@ set -euo pipefail echo "Cleaning up..." -docker rm --force simap-datastore 2>/dev/null || true -docker rm --force nce-fan-ctrl 2>/dev/null || true -docker rm --force nce-t-ctrl 2>/dev/null || true docker rm --force ai-engine 2>/dev/null || true -docker rm --force traffic-changer 2>/dev/null || true sleep 2 docker ps -a diff --git a/src/tests/tools/simap_datastore/simap_client/SimapClient.py b/src/tests/tools/simap_datastore/simap_client/SimapClient.py index 96237ba1202691e1dd044b0c84b40c5007bfae0a..3163f9ae72f11c3e1d18641cc278263de2d0b481 100644 --- a/src/tests/tools/simap_datastore/simap_client/SimapClient.py +++ b/src/tests/tools/simap_datastore/simap_client/SimapClient.py @@ -64,7 +64,8 @@ class TerminationPoint: class NodeTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}' + # ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -173,7 +174,8 @@ class Node: class LinkTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}' + # ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client diff --git a/src/tests/tools/simap_datastore/simap_client/__main__.py b/src/tests/tools/simap_datastore/simap_client/__main__.py index 4eda88decaeacbd9c01e29da799397d48763382f..65c2bacdaef014735cb29fd57ca1a59a490cfb5d 100644 --- a/src/tests/tools/simap_datastore/simap_client/__main__.py +++ b/src/tests/tools/simap_datastore/simap_client/__main__.py @@ -34,11 +34,18 @@ def main() -> None: simap_client = SimapClient(restconf_client) generator = SimapMetricsGenerator(service_count=5) - # ---> Only need to be created once in the lifetime of the SIMAP datastore <--- # - create_simap_te(simap_client) - create_simap_trans(simap_client) - create_simap_aggnet(simap_client) - create_simap_e2enet(simap_client) + try: + create_simap_te(simap_client) + create_simap_trans(simap_client) + create_simap_aggnet(simap_client) + create_simap_e2enet(simap_client) + except Exception as e: + error_msg = str(e) + if 'status_code=409' in error_msg or 'already exists' in error_msg.lower(): + LOGGER.warning('SIMAP topology already exists, skipping further creation requests.') + else: + LOGGER.error('Error creating SIMAP topology: %s', e) + return print('networks=', json.dumps(simap_client.networks())) @@ -91,13 +98,14 @@ def main() -> None: abstract_links[link_id].telemetry.update(bw, lat, related_service_ids=domain_service_map[link_id]) # Print telemetry summary - print(f'--- Iteration {i} | Services: {generator.service_count} ---') - for link_id, (bw, lat) in te_metrics.items(): - print(f'TE {link_id:4s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {te_service_ids}') - for link_id, (bw, lat) in abstract_metrics.items(): - print(f'{link_id:10s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {domain_service_map[link_id]}') - - time.sleep(5) + if i != 0 and i % 5 == 0: + print(f'--- Iteration {i} | Services: {generator.service_count} ---') + for link_id, (bw, lat) in te_metrics.items(): + print(f'TE {link_id:4s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {te_service_ids}') + for link_id, (bw, lat) in abstract_metrics.items(): + print(f'{link_id:10s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {domain_service_map[link_id]}') + + time.sleep(10) if __name__ == '__main__': diff --git a/src/tests/tools/simap_datastore/simap_datastore/TelemetryCallbacks.py b/src/tests/tools/simap_datastore/simap_datastore/TelemetryCallbacks.py index ad646b9a63722f6c88e0edb2fa76b4ac80d15b4a..1484b9307d9f28b2caa548ee2aecc012765e0a9a 100644 --- a/src/tests/tools/simap_datastore/simap_datastore/TelemetryCallbacks.py +++ b/src/tests/tools/simap_datastore/simap_datastore/TelemetryCallbacks.py @@ -101,12 +101,11 @@ class CallbackOnLinkTelemetry(_Callback): """ # Pattern matches: - # /restconf/data/ietf-network:networks/network=/ietf-network-topology:link=/simap-telemetry:simap-telemetry + # /restconf/data/ietf-network:networks/network=/ietf-network-topology:link= PATTERN = ( r'/restconf/data/ietf-network:networks' r'/network=(?P[^/]+)' r'/ietf-network-topology:link=(?P[^/]+)' - r'/simap-telemetry:simap-telemetry' ) def __init__(self, influx_client: SimapInfluxDBClient) -> None: @@ -119,7 +118,7 @@ class CallbackOnLinkTelemetry(_Callback): super().__init__(self.PATTERN) self._influx_client = influx_client - def execute( + def execute_data_update( self, match: re.Match, path: str, @@ -190,12 +189,11 @@ class CallbackOnNodeTelemetry(_Callback): """ # Pattern matches: - # /restconf/data/ietf-network:networks/network=/node=/simap-telemetry:simap-telemetry + # /restconf/data/ietf-network:networks/network=/node= PATTERN = ( r'/restconf/data/ietf-network:networks' r'/network=(?P[^/]+)' r'/node=(?P[^/]+)' - r'/simap-telemetry:simap-telemetry' ) def __init__(self, influx_client: SimapInfluxDBClient) -> None: @@ -208,7 +206,7 @@ class CallbackOnNodeTelemetry(_Callback): super().__init__(self.PATTERN) self._influx_client = influx_client - def execute( + def execute_data_update( self, match: re.Match, path: str, diff --git a/src/tests/tools/simap_datastore/simap_datastore/influxdb_client.py b/src/tests/tools/simap_datastore/simap_datastore/influxdb_client.py index 70dc566864c99ff84b3a27646268251ba1307b79..9e5b476192ece41a568feb2f9d86a06cfd76b39e 100644 --- a/src/tests/tools/simap_datastore/simap_datastore/influxdb_client.py +++ b/src/tests/tools/simap_datastore/simap_datastore/influxdb_client.py @@ -22,6 +22,8 @@ from typing import List, Optional from influxdb_client_3 import InfluxDBClient3, Point, WritePrecision +from .Config import INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_DATABASE, INFLUXDB_TOKEN + LOGGER = logging.getLogger(__name__) @@ -31,39 +33,62 @@ class SimapInfluxDBClient: Client wrapper for writing SIMAP telemetry data to InfluxDB 3.x. """ - def __init__( - self, - host: str, - port: int, - token: str, - database: str + def __init__( self, + host: Optional[str] = None, port: Optional[int] = None, + token: Optional[str] = None, database: Optional[str] = None ) -> None: """ Initialize the InfluxDB client. - Args: - host: InfluxDB server hostname - port: InfluxDB server port - token: Authentication token - database: Database/bucket name + host: InfluxDB server hostname (default: from INFLUXDB_HOST env or 'localhost') + port: InfluxDB server port (default: from INFLUXDB_PORT env or 8181) + token: Authentication token (default: from INFLUXDB_TOKEN env) + database: Database/bucket name (default: from INFLUXDB_DATABASE env or 'simap_telemetry') """ - self._host = host - self._port = port - self._database = database + self._host = host if host is not None else INFLUXDB_HOST + self._port = port if port is not None else INFLUXDB_PORT + self._database = database if database is not None else INFLUXDB_DATABASE + self._token = token if token is not None else INFLUXDB_TOKEN self._client: Optional[InfluxDBClient3] = None try: self._client = InfluxDBClient3( - token = token, - host = f"http://{host}:{port}", - database = database + token = self._token, + host = f"http://{self._host}:{self._port}", + database = self._database ) LOGGER.info("InfluxDB client initialized: host=%s:%d, database=%s", self._host, self._port, self._database) + + # Test the connection + if not self._test_connection(): + LOGGER.error("InfluxDB client initialized but connection test failed") + self._client = None + else: + LOGGER.info("InfluxDB connection test successful") + except Exception as e: # pylint: disable=broad-except LOGGER.error("Failed to initialize InfluxDB client: %s", str(e)) self._client = None + def _test_connection(self) -> bool: + """ + Test the InfluxDB connection by attempting a simple system query. + Returns: + True if connection is accessible, False otherwise + """ + if self._client is None: + LOGGER.warning("InfluxDB client not initialized, cannot test connection") + return False + + try: + query = "SHOW TABLES" + self._client.query(query=query, language="sql") + return True + except Exception as e: # pylint: disable=broad-except + LOGGER.error("InfluxDB connection test failed: %s", str(e)) + return False + def is_connected(self) -> bool: """Check if client is initialized.""" return self._client is not None