diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 51fef3ad564fd3711458890e744b34bda2a65500..a67cbeafc9edb6edf5aa6f5bfe1bc027d2099028 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -494,11 +494,11 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} echo - # Dashboard: Device ConfigureDevice Details - curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_config_exec_details.json' \ + # Dashboard: Device Execution Details + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_exec_details.json' \ ${GRAFANA_URL_UPDATED}/api/dashboards/db echo - DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-confdev" + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-exec" DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} echo diff --git a/src/compute/tests/test_slice.py b/src/compute/tests/test_slice.py new file mode 100644 index 0000000000000000000000000000000000000000..61f286eb74a876fa02546fc2bf1dcd8f092e718a --- /dev/null +++ b/src/compute/tests/test_slice.py @@ -0,0 +1,125 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, random, uuid +from typing import Dict, Tuple +from compute.service.rest_server.nbi_plugins.ietf_network_slice.bindings.network_slice_services import ( + NetworkSliceServices +) + +# R1 emulated devices +# Port 13-0 is Optical +# Port 13-1 is Copper +R1_UUID = "ed2388eb-5fb9-5888-a4f4-160267d3e19b" +R1_PORT_13_0_UUID_OPTICAL = "20440915-1a6c-5e7b-a80f-b0e0e51f066d" +R1_PORT_13_1_UUID_COPPER = "ff900d5d-2ac0-576c-9628-a2d016681f9d" + +# R2 emulated devices +# Port 13-0 is Optical +# Port 13-1 is Copper +R2_UUID = "49ce0312-1274-523b-97b8-24d0eca2d72d" +R2_PORT_13_0_UUID_OPTICAL = "214618cb-b63b-5e66-84c2-45c1c016e5f0" +R2_PORT_13_1_UUID_COPPER = "4e0f7fb4-5d22-56ad-a00e-20bffb4860f9" + +# R3 emulated devices +# Port 13-0 is Optical +# Port 13-1 is Copper +R3_UUID = "3bc8e994-a3b9-5f60-9c77-6608b1d08313" +R3_PORT_13_0_UUID_OPTICAL = "da5196f5-d651-5def-ada6-50ed6430279d" +R3_PORT_13_1_UUID_COPPER = "43d221fa-5701-5740-a129-502131f5bda2" + +# R4 emulated devices +# Port 13-0 is Optical +# Port 13-1 is Copper +R4_UUID = "b43e6361-2573-509d-9a88-1793e751b10d" +R4_PORT_13_0_UUID_OPTICAL = "241b74a7-8677-595c-ad65-cc9093c1e341" +R4_PORT_13_1_UUID_COPPER = "c57abf46-caaf-5954-90cc-1fec0a69330e" + +node_dict = {R1_PORT_13_1_UUID_COPPER: R1_UUID, + R2_PORT_13_1_UUID_COPPER: R2_UUID, + R3_PORT_13_1_UUID_COPPER: R3_UUID, + R4_PORT_13_1_UUID_COPPER: R4_UUID} +list_endpoints = [R1_PORT_13_1_UUID_COPPER, + R2_PORT_13_1_UUID_COPPER, + R3_PORT_13_1_UUID_COPPER, + R4_PORT_13_1_UUID_COPPER] + +list_availability= [99, 99.9, 99.99, 99.999, 99.9999] +list_bw = [10, 40, 50, 100, 150, 200, 400] +list_owner = ["Telefonica", "CTTC", "Telenor", "ADVA", "Ubitech", "ATOS"] + +URL_POST = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services" +URL_DELETE = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services/slice-service=" + +def generate_request(seed: str) -> Tuple[Dict, str]: + + ns = NetworkSliceServices() + + # Slice 1 + suuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(seed))) + slice1 = ns.slice_service[suuid] + slice1.service_description = "Test slice for OFC 2023 demo" + slice1.status().admin_status().status = "Planned" # TODO not yet mapped + + # SDPS: R1 optical to R3 optical + sdps1 = slice1.sdps().sdp + while True: + ep1_uuid = random.choice(list_endpoints) + ep2_uuid = random.choice(list_endpoints) + if ep1_uuid != ep2_uuid: + break + + sdps1[ep1_uuid].node_id = node_dict.get(ep1_uuid) + sdps1[ep2_uuid].node_id = node_dict.get(ep2_uuid) + + # Connectivity group: Connection construct and 2 sla constrains: + # - Bandwidth + # - Availability + cg_uuid = str(uuid.uuid4()) + cg = slice1.connection_groups().connection_group + cg1 = cg[cg_uuid] + + cc1 = cg1.connectivity_construct[0] + cc1.cc_id = 5 + p2p = cc1.connectivity_construct_type.p2p() + p2p.p2p_sender_sdp = ep1_uuid + p2p.p2p_receiver_sdp = ep2_uuid + + slo_custom = cc1.slo_sle_policy.custom() + metric_bounds = slo_custom.service_slo_sle_policy().metric_bounds().metric_bound + + # SLO Bandwidth + slo_bandwidth = metric_bounds["service-slo-two-way-bandwidth"] + slo_bandwidth.value_description = "Guaranteed bandwidth" + slo_bandwidth.bound = int(random.choice(list_bw)) + slo_bandwidth.metric_unit = "Gbps" + + # SLO Availability + slo_availability = metric_bounds["service-slo-availability"] + slo_availability.value_description = "Guaranteed availability" + slo_availability.metric_unit = "percentage" + slo_availability.bound = random.choice(list_availability) + + json_request = {"data": ns.to_json()} + + #Last, add name and owner manually + list_name_owner = [{"tag-type": "owner", "value": random.choice(list_owner)}] + json_request["data"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"] = list_name_owner + + return (json_request, suuid) + + +if __name__ == "__main__": + request = generate_request(123) + print(json.dumps(request[0], sort_keys=True, indent=4)) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index e7fec041802cc661b14617a8ebfec0864c738b39..38a6b735b32ee667c3be2f5381df84c40d773c06 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -37,8 +37,8 @@ LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Device', 'RPC') -METRICS_POOL_DETAILS = MetricsPool('Device', 'exec_details', labels={ - 'step_name': '', +METRICS_POOL_DETAILS = MetricsPool('Device', 'execution', labels={ + 'driver': '', 'operation': '', 'step': '', }) class DeviceServiceServicerImpl(DeviceServiceServicer): @@ -51,11 +51,15 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def AddDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: + t0 = time.time() + device_uuid = request.device_id.device_uuid.uuid connection_config_rules = check_connect_rules(request.device_config) check_no_endpoints(request.device_endpoints) + t1 = time.time() + context_client = ContextClient() device = get_device(context_client, device_uuid, rw_copy=True) if device is None: @@ -73,10 +77,15 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): # update device_uuid to honor UUID provided by Context device_uuid = device.device_id.device_uuid.uuid + t2 = time.time() + self.mutex_queues.wait_my_turn(device_uuid) + t3 = time.time() try: driver : _Driver = get_driver(self.driver_instance_cache, device) + t4 = time.time() + errors = [] # Sub-devices and sub-links are exposed by intermediate controllers or represent mgmt links. @@ -86,13 +95,23 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): new_sub_links : Dict[str, Link] = dict() if len(device.device_endpoints) == 0: + t5 = time.time() # created from request, populate endpoints using driver errors.extend(populate_endpoints( device, driver, self.monitoring_loops, new_sub_devices, new_sub_links)) + t6 = time.time() + t_pop_endpoints = t6 - t5 + else: + t_pop_endpoints = None if len(device.device_config.config_rules) == len(connection_config_rules): # created from request, populate config rules using driver + t7 = time.time() errors.extend(populate_config_rules(device, driver)) + t8 = time.time() + t_pop_config_rules = t8 - t7 + else: + t_pop_config_rules = None # TODO: populate components @@ -100,22 +119,60 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): for error in errors: LOGGER.error(error) raise OperationFailedException('AddDevice', extra_details=errors) + t9 = time.time() + device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED device_id = context_client.SetDevice(device) + t10 = time.time() + for sub_device in new_sub_devices.values(): context_client.SetDevice(sub_device) + t11 = time.time() + for sub_links in new_sub_links.values(): context_client.SetLink(sub_links) + t12 = time.time() + # Update endpoint monitoring resources with UUIDs device_with_uuids = get_device( context_client, device_id.device_uuid.uuid, rw_copy=False, include_endpoints=True, include_components=False, include_config_rules=False) populate_endpoint_monitoring_resources(device_with_uuids, self.monitoring_loops) + t13 = time.time() + context_client.close() + + t14 = time.time() + + metrics_labels = dict(driver=driver.name, operation='add_device') + + histogram_duration : Histogram = METRICS_POOL_DETAILS.get_or_create( + 'details', MetricTypeEnum.HISTOGRAM_DURATION) + histogram_duration.labels(step='total' , **metrics_labels).observe(t14-t0) + histogram_duration.labels(step='execution' , **metrics_labels).observe(t14-t3) + histogram_duration.labels(step='endpoint_checks' , **metrics_labels).observe(t1-t0) + histogram_duration.labels(step='get_device' , **metrics_labels).observe(t2-t1) + histogram_duration.labels(step='wait_queue' , **metrics_labels).observe(t3-t2) + histogram_duration.labels(step='get_driver' , **metrics_labels).observe(t4-t3) + histogram_duration.labels(step='set_device' , **metrics_labels).observe(t10-t9) + histogram_duration.labels(step='populate_monit_rsrc', **metrics_labels).observe(t13-t12) + + if t_pop_endpoints is not None: + histogram_duration.labels(step='populate_endpoints', **metrics_labels).observe(t_pop_endpoints) + + if t_pop_config_rules is not None: + histogram_duration.labels(step='populate_config_rules', **metrics_labels).observe(t_pop_config_rules) + + if len(new_sub_devices) > 0: + histogram_duration.labels(step='set_sub_devices', **metrics_labels).observe(t11-t10) + + if len(new_sub_links) > 0: + histogram_duration.labels(step='set_sub_links', **metrics_labels).observe(t12-t11) + return device_id finally: self.mutex_queues.signal_done(device_uuid) @@ -195,16 +252,18 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): t9 = time.time() + metrics_labels = dict(driver=driver.name, operation='configure_device') + histogram_duration : Histogram = METRICS_POOL_DETAILS.get_or_create( - 'ConfigureDevice', MetricTypeEnum.HISTOGRAM_DURATION) - histogram_duration.labels(step_name='total' ).observe(t9-t0) - histogram_duration.labels(step_name='wait_queue' ).observe(t1-t0) - histogram_duration.labels(step_name='execution' ).observe(t9-t1) - histogram_duration.labels(step_name='get_device' ).observe(t3-t2) - histogram_duration.labels(step_name='split_rules' ).observe(t5-t4) - histogram_duration.labels(step_name='configure_rules' ).observe(t6-t5) - histogram_duration.labels(step_name='deconfigure_rules').observe(t7-t6) - histogram_duration.labels(step_name='set_device' ).observe(t9-t8) + 'details', MetricTypeEnum.HISTOGRAM_DURATION) + histogram_duration.labels(step='total' , **metrics_labels).observe(t9-t0) + histogram_duration.labels(step='wait_queue' , **metrics_labels).observe(t1-t0) + histogram_duration.labels(step='execution' , **metrics_labels).observe(t9-t1) + histogram_duration.labels(step='get_device' , **metrics_labels).observe(t3-t2) + histogram_duration.labels(step='split_rules' , **metrics_labels).observe(t5-t4) + histogram_duration.labels(step='configure_rules' , **metrics_labels).observe(t6-t5) + histogram_duration.labels(step='deconfigure_rules', **metrics_labels).observe(t7-t6) + histogram_duration.labels(step='set_device' , **metrics_labels).observe(t9-t8) return device_id finally: diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py index 947bc8570a941f8f666c87647d89c315b1bd202a..7adaec79dc99f9b7c836acaec886b0d5bda97fb8 100644 --- a/src/device/service/driver_api/_Driver.py +++ b/src/device/service/driver_api/_Driver.py @@ -27,7 +27,7 @@ RESOURCE_ACL = '__acl__' class _Driver: - def __init__(self, address: str, port: int, **settings) -> None: + def __init__(self, name : str, address: str, port: int, **settings) -> None: """ Initialize Driver. Parameters: address : str @@ -37,7 +37,22 @@ class _Driver: **settings Extra settings required by the driver. """ - raise NotImplementedError() + self._name = name + self._address = address + self._port = port + self._settings = settings + + @property + def name(self): return self._name + + @property + def address(self): return self._address + + @property + def port(self): return self._port + + @property + def settings(self): return self._settings def Connect(self) -> bool: """ Connect to the Device. diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py index 2acb288784d6da5b202f14c2534ee1a59486a20e..8f9453574a7333e599ea56158204627fcfdd3680 100644 --- a/src/device/service/drivers/emulated/EmulatedDriver.py +++ b/src/device/service/drivers/emulated/EmulatedDriver.py @@ -31,16 +31,18 @@ LOGGER = logging.getLogger(__name__) RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'^\/interface\[([^\]]+)\].*') -METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'emulated'}) +DRIVER_NAME = 'emulated' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class EmulatedDriver(_Driver): - def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called + def __init__(self, address : str, port : int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) self.__lock = threading.Lock() self.__initial = TreeNode('.') self.__running = TreeNode('.') self.__subscriptions = TreeNode('.') - endpoints = settings.get('endpoints', []) + endpoints = self.settings.get('endpoints', []) endpoint_resources = [] for endpoint in endpoints: endpoint_resource = compose_resource_endpoint(endpoint) diff --git a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py index 96dfd2c15f6b359e254a6d6a24dfe42a546833ce..9498dc84cc6991fd2295371842fa8508c961f1bc 100644 --- a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py +++ b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py @@ -39,21 +39,23 @@ ALL_RESOURCE_KEYS = [ SERVICE_TYPE = 'ELINE' -METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'ietf_l2vpn'}) +DRIVER_NAME = 'ietf_l2vpn' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class IetfL2VpnDriver(_Driver): - def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - username = settings.get('username') - password = settings.get('password') - scheme = settings.get('scheme', 'http') - wim = {'wim_url': '{:s}://{:s}:{:d}'.format(scheme, address, int(port))} + username = self.settings.get('username') + password = self.settings.get('password') + scheme = self.settings.get('scheme', 'http') + wim = {'wim_url': '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port))} wim_account = {'user': username, 'password': password} # Mapping updated dynamically with each request config = {'mapping_not_needed': False, 'service_endpoint_mapping': []} - self.dac = TfsDebugApiClient(address, int(port), scheme=scheme, username=username, password=password) + self.dac = TfsDebugApiClient(self.address, int(self.port), scheme=scheme, username=username, password=password) self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config) self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors diff --git a/src/device/service/drivers/microwave/IETFApiDriver.py b/src/device/service/drivers/microwave/IETFApiDriver.py index fad7cd0736ec35c5675461af241b2e7de2295dac..a8ef9094652378df8d1f1a55868849316b7ec95b 100644 --- a/src/device/service/drivers/microwave/IETFApiDriver.py +++ b/src/device/service/drivers/microwave/IETFApiDriver.py @@ -23,20 +23,22 @@ from .Tools import create_connectivity_service, find_key, config_getter, delete_ LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'microwave'}) +DRIVER_NAME = 'microwave' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class IETFApiDriver(_Driver): - def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - username = settings.get('username') - password = settings.get('password') + username = self.settings.get('username') + password = self.settings.get('password') self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None - scheme = settings.get('scheme', 'http') - self.__ietf_root = '{:s}://{:s}:{:d}'.format(scheme, address, int(port)) - self.__timeout = int(settings.get('timeout', 120)) - self.__node_ids = set(settings.get('node_ids', [])) + scheme = self.settings.get('scheme', 'http') + self.__ietf_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port)) + self.__timeout = int(self.settings.get('timeout', 120)) + self.__node_ids = set(self.settings.get('node_ids', [])) def Connect(self) -> bool: url = self.__ietf_root + '/nmswebs/restconf/data/ietf-network:networks' diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index 2399b9ac01258a21a4da6a9aa0e5bc09ea851951..ac67c4ab0d314adb3ce2af0aaffeda18e67334fc 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -235,11 +235,13 @@ def edit_config( results = [e for _ in resources] # if commit fails, set exception in each resource return results -METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'openconfig'}) +DRIVER_NAME = 'openconfig' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class OpenConfigDriver(_Driver): - def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called - self.__logger = logging.getLogger('{:s}:[{:s}:{:s}]'.format(str(__name__), str(address), str(port))) + def __init__(self, address : str, port : int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) + self.__logger = logging.getLogger('{:s}:[{:s}:{:s}]'.format(str(__name__), str(self.address), str(self.port))) self.__lock = threading.Lock() #self.__initial = TreeNode('.') #self.__running = TreeNode('.') @@ -249,11 +251,11 @@ class OpenConfigDriver(_Driver): self.__scheduler = BackgroundScheduler(daemon=True) # scheduler used to emulate sampling events self.__scheduler.configure( jobstores = {'default': MemoryJobStore()}, - executors = {'default': ThreadPoolExecutor(max_workers=1)}, + executors = {'default': ThreadPoolExecutor(max_workers=1)}, # important! 1 = avoid concurrent requests job_defaults = {'coalesce': False, 'max_instances': 3}, timezone=pytz.utc) self.__out_samples = queue.Queue() - self.__netconf_handler : NetconfSessionHandler = NetconfSessionHandler(address, port, **settings) + self.__netconf_handler = NetconfSessionHandler(self.address, self.port, **(self.settings)) self.__samples_cache = SamplesCache(self.__netconf_handler, self.__logger) def Connect(self) -> bool: diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py index de47f49c05b0f344999382883233a12eceb43c1b..9577b9dad436929d9d9ee1804bcac47cf5c26f91 100644 --- a/src/device/service/drivers/p4/p4_driver.py +++ b/src/device/service/drivers/p4/p4_driver.py @@ -41,7 +41,8 @@ except ImportError: LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'p4'}) +DRIVER_NAME = 'p4' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class P4Driver(_Driver): """ @@ -80,7 +81,7 @@ class P4Driver(_Driver): self.__endpoint = None self.__settings = settings self.__id = None - self.__name = None + self.__name = DRIVER_NAME self.__vendor = P4_VAL_DEF_VENDOR self.__hw_version = P4_VAL_DEF_HW_VER self.__sw_version = P4_VAL_DEF_SW_VER diff --git a/src/device/service/drivers/transport_api/TransportApiDriver.py b/src/device/service/drivers/transport_api/TransportApiDriver.py index 1991a34d0d797c48b6c2296435c0ebd0f3a8125a..98ed8e6aae613ea45519143c89e72af32f3b2620 100644 --- a/src/device/service/drivers/transport_api/TransportApiDriver.py +++ b/src/device/service/drivers/transport_api/TransportApiDriver.py @@ -23,19 +23,21 @@ from .Tools import create_connectivity_service, find_key, config_getter, delete_ LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'transport_api'}) +DRIVER_NAME = 'transport_api' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class TransportApiDriver(_Driver): - def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - username = settings.get('username') - password = settings.get('password') + username = self.settings.get('username') + password = self.settings.get('password') self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None - scheme = settings.get('scheme', 'http') - self.__tapi_root = '{:s}://{:s}:{:d}'.format(scheme, address, int(port)) - self.__timeout = int(settings.get('timeout', 120)) + scheme = self.settings.get('scheme', 'http') + self.__tapi_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port)) + self.__timeout = int(self.settings.get('timeout', 120)) def Connect(self) -> bool: url = self.__tapi_root + '/restconf/data/tapi-common:context' diff --git a/src/device/service/drivers/xr/XrDriver.py b/src/device/service/drivers/xr/XrDriver.py index c1471a8136b0e5cd7791e019bb0bdafd2252f591..46269ff8904a0e20dbcb08202220412e64cb6283 100644 --- a/src/device/service/drivers/xr/XrDriver.py +++ b/src/device/service/drivers/xr/XrDriver.py @@ -33,21 +33,23 @@ urllib3.disable_warnings() LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'xr'}) +DRIVER_NAME = 'xr' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class XrDriver(_Driver): - def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - self.__timeout = int(settings.get('timeout', 120)) - self.__cm_address = address + self.__timeout = int(self.settings.get('timeout', 120)) + self.__cm_address = self.address # Mandatory key, an exception will get thrown if missing - self.__hub_module_name = settings["hub_module_name"] + self.__hub_module_name = self.settings["hub_module_name"] tls_verify = False # Currently using self signed certificates - username = settings.get("username", "xr-user-1") - password = settings.get("password", "xr-user-1") + username = self.settings.get("username", "xr-user-1") + password = self.settings.get("password", "xr-user-1") # Options are: # disabled --> just import endpoints as usual @@ -55,7 +57,7 @@ class XrDriver(_Driver): # (a remotely-controlled transport domain might exist between them) # topology --> imports sub-devices and links connecting them. # (not supported by XR driver) - self.__import_topology = get_import_topology(settings, default=ImportTopologyEnum.DISABLED) + self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.DISABLED) # Options are: # asynchronous --> operation considered complete when IPM responds with suitable status code, @@ -64,12 +66,12 @@ class XrDriver(_Driver): # lifecycle --> operation is considered successfull once IPM has completed pluggaable configuration # or failed in it. This is typically unsuitable for production use # (as some optics may be transiently unreachable), but is convenient for demos and testin. - consistency_mode = ConsistencyMode.from_str(settings.get("consistency-mode", "asynchronous")) + consistency_mode = ConsistencyMode.from_str(self.settings.get("consistency-mode", "asynchronous")) - self.__cm_connection = CmConnection(address, int(port), username, password, self.__timeout, tls_verify = tls_verify, consistency_mode=consistency_mode) + self.__cm_connection = CmConnection(self.address, int(self.port), username, password, self.__timeout, tls_verify = tls_verify, consistency_mode=consistency_mode) self.__constellation = None - LOGGER.info(f"XrDriver instantiated, cm {address}:{port}, consistency mode {str(consistency_mode)}, {settings=}") + LOGGER.info(f"XrDriver instantiated, cm {self.address}:{self.port}, consistency mode {str(consistency_mode)}, {self.settings=}") def __str__(self): return f"{self.__hub_module_name}@{self.__cm_address}" diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index 974ce6f130e9c81f273f418b0a1440d148fcfb74..fdd400a2110fd4a75d6f9e8cc4820bc943eef423 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -39,14 +39,6 @@ ROUTER_ID = { 'R149': '5.5.5.5', 'R155': '5.5.5.1', 'R199': '5.5.5.6', - -} - -VIRTUAL_CIRCUIT = { - 'R149': '5.5.5.5', - 'R155': '5.5.5.1', - 'R199': '5.5.5.6', - } class RequestGenerator: @@ -269,8 +261,8 @@ class RequestGenerator: src_device_name = self._device_data[src_device_uuid]['name'] src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name'] - src_router_id = ROUTER_ID.get(src_device_name) src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0]) + src_router_id = ROUTER_ID.get(src_device_name) if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num) dst_device_name = self._device_data[dst_device_uuid]['name'] @@ -322,8 +314,8 @@ class RequestGenerator: src_device_name = self._device_data[src_device_uuid]['name'] src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name'] - src_router_id = ROUTER_ID.get(src_device_name) src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0]) + src_router_id = ROUTER_ID.get(src_device_name) if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num) src_address_ip = '10.{:d}.{:d}.{:d}'.format(x, y, src_router_num) diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py index f928f07b94c71fb6f378161862e96d41af8bde7f..11574e8f6577db0bab4add96da8157496d40e6f5 100644 --- a/src/monitoring/service/MetricsDBTools.py +++ b/src/monitoring/service/MetricsDBTools.py @@ -13,6 +13,7 @@ # limitations under the License. import time +import math from random import random from questdb.ingress import Sender, IngressError @@ -326,4 +327,4 @@ class MetricsDB(): else: LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}") except (Exception) as e: - LOGGER.debug(f"Alarm data cannot be retrieved. {e}") \ No newline at end of file + LOGGER.debug(f"Alarm data cannot be retrieved. {e}") diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py index 40cb0857617983df4cfd926baebcbff85e169894..8ffdfaf3ed9d35b52e9c262a980e6e8e8fd234af 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py @@ -67,6 +67,7 @@ def convert_explicit_path_hops_to_connections( prv_res_class : Tuple[Optional[int], Optional[DeviceTypeEnum], Optional[str]] = None, None, None for path_hop in path_hops: + LOGGER.debug('path_hop={:s}'.format(str(path_hop))) device_uuid = path_hop['device'] if prv_device_uuid == device_uuid: continue device_tuple = device_dict.get(device_uuid) @@ -74,24 +75,33 @@ def convert_explicit_path_hops_to_connections( _,grpc_device = device_tuple res_class = get_resource_classification(grpc_device, device_dict) - if res_class[1] in IGNORED_DEVICE_TYPES: continue + LOGGER.debug(' prv_res_class={:s}'.format(str(prv_res_class))) + LOGGER.debug(' res_class={:s}'.format(str(res_class))) + if res_class[1] in IGNORED_DEVICE_TYPES: + LOGGER.debug(' ignored') + continue if prv_res_class[0] is None: # path ingress + LOGGER.debug(' path ingress') connection_stack.put((main_service_uuid, main_service_type, [path_hop], [])) elif prv_res_class[0] > res_class[0]: # create underlying connection + LOGGER.debug(' create underlying connection') connection_uuid = str(uuid.uuid4()) prv_service_type = connection_stack.queue[-1][1] service_type = get_service_type(res_class[1], prv_service_type) connection_stack.put((connection_uuid, service_type, [path_hop], [])) elif prv_res_class[0] == res_class[0]: # same resource group kind + LOGGER.debug(' same resource group kind') if prv_res_class[1] == res_class[1] and prv_res_class[2] == res_class[2]: # same device type and device controller: connection continues + LOGGER.debug(' connection continues') connection_stack.queue[-1][2].append(path_hop) else: # different device type or device controller: chain connections + LOGGER.debug(' chain connections') connection = connection_stack.get() connections.append(connection) connection_stack.queue[-1][3].append(connection[0]) @@ -102,6 +112,7 @@ def convert_explicit_path_hops_to_connections( connection_stack.put((connection_uuid, service_type, [path_hop], [])) elif prv_res_class[0] < res_class[0]: # underlying connection ended + LOGGER.debug(' underlying connection ended') connection = connection_stack.get() connections.append(connection) connection_stack.queue[-1][3].append(connection[0]) @@ -113,6 +124,7 @@ def convert_explicit_path_hops_to_connections( prv_res_class = res_class # path egress + LOGGER.debug(' path egress') connections.append(connection_stack.get()) LOGGER.debug('connections={:s}'.format(str(connections))) assert connection_stack.empty() diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index 53c89cd124cb7d3431b37a50596b0b793cfa83eb..e56d436dd006197497d7774be598a480a134320c 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -33,12 +33,12 @@ DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.EMULATED_P4_SWITCH.value : 60, DeviceTypeEnum.P4_SWITCH.value : 60, - DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 40, - DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value : 40, - DeviceTypeEnum.EMULATED_XR_CONSTELLATION.value : 40, DeviceTypeEnum.XR_CONSTELLATION.value : 40, + DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 30, + DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value : 30, + DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value : 30, DeviceTypeEnum.OPEN_LINE_SYSTEM.value : 30, diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index 96751e83770e1b98df4770cf74bb453f6a0519ef..acda45ce80a62a4a3723744546968e3195799b59 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -12,23 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json +import json, logging from enum import Enum from typing import TYPE_CHECKING, Any, Dict, Optional, Union from common.method_wrappers.ServiceExceptions import NotFoundException -from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId +from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceDriverEnum, DeviceId, Service, ServiceId from common.tools.context_queries.Connection import get_connection_by_id from common.tools.context_queries.Device import get_device from common.tools.context_queries.Service import get_service_by_id +from common.tools.grpc.Tools import grpc_message_list_to_json_string from common.tools.object_factory.Device import json_device_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient +from service.service.service_handler_api.Exceptions import ( + UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException) from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key if TYPE_CHECKING: from service.service.service_handler_api._ServiceHandler import _ServiceHandler +LOGGER = logging.getLogger(__name__) + CacheableObject = Union[Connection, Device, Service] class CacheableObjectType(Enum): @@ -169,5 +174,21 @@ class TaskExecutor: self, connection : Connection, service : Service, **service_handler_settings ) -> '_ServiceHandler': connection_devices = self.get_devices_from_connection(connection, exclude_managed_by_controller=True) - service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices) - return service_handler_class(service, self, **service_handler_settings) + try: + service_handler_class = get_service_handler_class( + self._service_handler_factory, service, connection_devices) + return service_handler_class(service, self, **service_handler_settings) + except (UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException): + dict_connection_devices = { + cd_data.name : (cd_uuid, cd_data.name, { + (device_driver, DeviceDriverEnum.Name(device_driver)) + for device_driver in cd_data.device_drivers + }) + for cd_uuid,cd_data in connection_devices.items() + } + LOGGER.exception( + 'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}'.format( + grpc_message_list_to_json_string(service), grpc_message_list_to_json_string(connection), + str(dict_connection_devices) + ) + ) diff --git a/src/tests/p4/setup.sh b/src/tests/p4/setup.sh index 78e7f7372d911623cd541495ab15ad3cd548c3ef..a98ad31ab159217c209d5077b258fd398d5113cd 100755 --- a/src/tests/p4/setup.sh +++ b/src/tests/p4/setup.sh @@ -16,7 +16,7 @@ export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}') -kubectl exec ${POD_NAME} -n=tfs -- mkdir /root/p4 +kubectl exec ${POD_NAME} -n=tfs -c=server -- mkdir /root/p4 -kubectl cp src/tests/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 -kubectl cp src/tests/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 +kubectl cp src/tests/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 -c=server +kubectl cp src/tests/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 -c=server diff --git a/src/tests/tools/perf_plots/Component_RPC_Methods.py b/src/tests/tools/perf_plots/Component_RPC_Methods.py new file mode 100644 index 0000000000000000000000000000000000000000..7aa3ed304bc7d923a5ba634917fd95c28aea513b --- /dev/null +++ b/src/tests/tools/perf_plots/Component_RPC_Methods.py @@ -0,0 +1,123 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, re +from typing import Dict, List, Optional, Tuple +from .tools.FileSystem import create_folders +from .tools.HistogramData import HistogramData +from .tools.Plotter import plot_histogram +from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names +from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms + +##### EXPERIMENT SETTINGS ############################################################################################## + +EXPERIMENT_NAME = 'L2VPN with Emulated' +EXPERIMENT_ID = 'l2vpn-emu' +TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc) +TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc) +TIME_STEP = '1m' +LABEL_FILTERS = {} + +##### ENVIRONMENT SETTINGS ############################################################################################# + +PROM_ADDRESS = '127.0.0.1' +PROM_PORT = 9090 +OUT_FOLDER = 'data/perf/' + +##### PLOT-SPECIFIC CUSTOMIZATIONS ##################################################################################### + +EXPERIMENT_ID += '/component-rpcs' +SERIES_MATCH = 'tfs_.+_rpc_.+_histogram_duration_bucket' +RE_SERIES_NAME = re.compile(r'^tfs_(.+)_rpc_(.+)_histogram_duration_bucket$') +SERIES_LABELS = [] + +SUBSYSTEMS_MAPPING = { + 'context': { + 'context' : 'context', + 'topolog' : 'topology', + 'device' : 'device', + 'endpoint' : 'device', + 'link' : 'link', + 'service' : 'service', + 'slice' : 'slice', + 'policyrule': 'policyrule', + 'connection': 'connection', + } +} + +def get_subsystem(component : str, rpc_method : str) -> Optional[str]: + return next(iter([ + subsystem + for pattern,subsystem in SUBSYSTEMS_MAPPING.get(component, {}).items() + if pattern in rpc_method + ]), None) + +def update_keys(component : str, rpc_method : str) -> Tuple[Tuple, Tuple]: + subsystem = get_subsystem(component, rpc_method) + collection_keys = (component, subsystem) + histogram_keys = (rpc_method,) + return collection_keys, histogram_keys + +def get_plot_specs(folders : Dict[str, str], component : str, subsystem : Optional[str]) -> Tuple[str, str]: + if subsystem is None: + title = '{:s} - RPC Methods [{:s}]'.format(component.title(), EXPERIMENT_NAME) + filepath = '{:s}/{:s}.png'.format(folders['png'], component) + else: + title = '{:s} - RPC Methods - {:s} [{:s}]'.format(component.title(), subsystem.title(), EXPERIMENT_NAME) + filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], component, subsystem) + return title, filepath + +##### AUTOMATED CODE ################################################################################################### + +def get_series_names(folders : Dict[str, str]) -> List[str]: + series_names = get_prometheus_series_names( + PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END, + raw_json_filepath='{:s}/_series.json'.format(folders['json']) + ) + return series_names + +def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]: + m = RE_SERIES_NAME.match(series_name) + if m is None: + # pylint: disable=broad-exception-raised + raise Exception('Unparsable series name: {:s}'.format(str(series_name))) + extra_labels = m.groups() + results = get_prometheus_range( + PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP, + raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name) + ) + histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels) + unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False) + save_histograms(histograms, folders['csv']) + return histograms + +def main() -> None: + histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict() + + folders = create_folders(OUT_FOLDER, EXPERIMENT_ID) + series_names = get_series_names(folders) + + for series_name in series_names: + histograms = get_histogram_data(series_name, folders) + for histogram_keys, histogram_data in histograms.items(): + collection_keys,histogram_keys = update_keys(*histogram_keys) + histograms = histograms_collection.setdefault(collection_keys, dict()) + histograms[histogram_keys] = histogram_data + + for histogram_keys,histograms in histograms_collection.items(): + title, filepath = get_plot_specs(folders, *histogram_keys) + plot_histogram(histograms, filepath, title=title) + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/perf_plots/Device_Driver_Details.py b/src/tests/tools/perf_plots/Device_Driver_Details.py new file mode 100644 index 0000000000000000000000000000000000000000..24b287cc826872f48acc8c24c4f51ecd7ba8c676 --- /dev/null +++ b/src/tests/tools/perf_plots/Device_Driver_Details.py @@ -0,0 +1,101 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, re +from typing import Dict, List, Optional, Tuple +from .tools.FileSystem import create_folders +from .tools.HistogramData import HistogramData +from .tools.Plotter import plot_histogram +from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names +from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms + +##### EXPERIMENT SETTINGS ############################################################################################## + +EXPERIMENT_NAME = 'L2VPN with Emulated' +EXPERIMENT_ID = 'l2vpn-emu' +TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc) +TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc) +TIME_STEP = '1m' +LABEL_FILTERS = { + #'driver': 'emulated', + #'operation': 'configure_device', # add_device / configure_device + #'step': 'get_device', +} + +##### ENVIRONMENT SETTINGS ############################################################################################# + +PROM_ADDRESS = '127.0.0.1' +PROM_PORT = 9090 +OUT_FOLDER = 'data/perf/' + +##### PLOT-SPECIFIC CUSTOMIZATIONS ##################################################################################### + +EXPERIMENT_ID += '/dev-drv-details' +SERIES_MATCH = 'tfs_device_execution_details_histogram_duration_bucket' +RE_SERIES_NAME = re.compile(r'^tfs_device_execution_details_histogram_duration_bucket$') +SERIES_LABELS = ['driver', 'operation', 'step'] + +def update_keys(driver : str, operation : str, step : str) -> Tuple[Tuple, Tuple]: + collection_keys = (driver, operation) + histogram_keys = (step,) + return collection_keys, histogram_keys + +def get_plot_specs(folders : Dict[str, str], driver : str, operation : str) -> Tuple[str, str]: + title = 'Device Driver - {:s} - {:s}'.format(driver.title(), operation.replace('_', '').title()) + filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], driver, operation) + return title, filepath + +##### AUTOMATED CODE ################################################################################################### + +def get_series_names(folders : Dict[str, str]) -> List[str]: + series_names = get_prometheus_series_names( + PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END, + raw_json_filepath='{:s}/_series.json'.format(folders['json']) + ) + return series_names + +def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]: + m = RE_SERIES_NAME.match(series_name) + if m is None: + # pylint: disable=broad-exception-raised + raise Exception('Unparsable series name: {:s}'.format(str(series_name))) + extra_labels = m.groups() + results = get_prometheus_range( + PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP, + raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name) + ) + histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels) + unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False) + save_histograms(histograms, folders['csv']) + return histograms + +def main() -> None: + histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict() + + folders = create_folders(OUT_FOLDER, EXPERIMENT_ID) + series_names = get_series_names(folders) + + for series_name in series_names: + histograms = get_histogram_data(series_name, folders) + for histogram_keys, histogram_data in histograms.items(): + collection_keys,histogram_keys = update_keys(*histogram_keys) + histograms = histograms_collection.setdefault(collection_keys, dict()) + histograms[histogram_keys] = histogram_data + + for histogram_keys,histograms in histograms_collection.items(): + title, filepath = get_plot_specs(folders, *histogram_keys) + plot_histogram(histograms, filepath, title=title) + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/perf_plots/Device_Driver_Methods.py b/src/tests/tools/perf_plots/Device_Driver_Methods.py new file mode 100644 index 0000000000000000000000000000000000000000..a92bd13747f6c7c6aa861c989e9f1199ef3870d0 --- /dev/null +++ b/src/tests/tools/perf_plots/Device_Driver_Methods.py @@ -0,0 +1,99 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, re +from typing import Dict, List, Tuple +from .tools.FileSystem import create_folders +from .tools.HistogramData import HistogramData +from .tools.Plotter import plot_histogram +from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names +from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms + +##### EXPERIMENT SETTINGS ############################################################################################## + +EXPERIMENT_NAME = 'L2VPN with Emulated' +EXPERIMENT_ID = 'l2vpn-emu' +TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc) +TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc) +TIME_STEP = '1m' +LABEL_FILTERS = { + #'driver': 'emulated', +} + +##### ENVIRONMENT SETTINGS ############################################################################################# + +PROM_ADDRESS = '127.0.0.1' +PROM_PORT = 9090 +OUT_FOLDER = 'data/perf/' + +##### PLOT-SPECIFIC CUSTOMIZATIONS ##################################################################################### + +EXPERIMENT_ID += '/dev-drv-methods' +SERIES_MATCH = 'tfs_device_driver_.+_histogram_duration_bucket' +RE_SERIES_NAME = re.compile(r'^tfs_device_driver_(.+)_histogram_duration_bucket$') +SERIES_LABELS = ['driver'] + +def update_keys(driver : str, method : str) -> Tuple[Tuple, Tuple]: + collection_keys = (driver,) + histogram_keys = (method,) + return collection_keys, histogram_keys + +def get_plot_specs(folders : Dict[str, str], driver : str) -> Tuple[str, str]: + title = 'Device Driver - {:s}'.format(driver.title()) + filepath = '{:s}/{:s}.png'.format(folders['png'], driver) + return title, filepath + +##### AUTOMATED CODE ################################################################################################### + +def get_series_names(folders : Dict[str, str]) -> List[str]: + series_names = get_prometheus_series_names( + PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END, + raw_json_filepath='{:s}/_series.json'.format(folders['json']) + ) + return series_names + +def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]: + m = RE_SERIES_NAME.match(series_name) + if m is None: + # pylint: disable=broad-exception-raised + raise Exception('Unparsable series name: {:s}'.format(str(series_name))) + extra_labels = m.groups() + results = get_prometheus_range( + PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP, + raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name) + ) + histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels) + unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False) + save_histograms(histograms, folders['csv']) + return histograms + +def main() -> None: + histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict() + + folders = create_folders(OUT_FOLDER, EXPERIMENT_ID) + series_names = get_series_names(folders) + + for series_name in series_names: + histograms = get_histogram_data(series_name, folders) + for histogram_keys, histogram_data in histograms.items(): + collection_keys,histogram_keys = update_keys(*histogram_keys) + histograms = histograms_collection.setdefault(collection_keys, dict()) + histograms[histogram_keys] = histogram_data + + for histogram_keys,histograms in histograms_collection.items(): + title, filepath = get_plot_specs(folders, *histogram_keys) + plot_histogram(histograms, filepath, title=title) + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/perf_plots/README.md b/src/tests/tools/perf_plots/README.md new file mode 100644 index 0000000000000000000000000000000000000000..14dcb1c9508a1b63c62ae84aef4abe3e17589ef7 --- /dev/null +++ b/src/tests/tools/perf_plots/README.md @@ -0,0 +1,29 @@ +# Tool: Perf Plots Generator: + +Simple tool to gather performance data from Prometheus and produce histogram plots. + +## Example: + +- Ensure your MicroK8s includes the monitoring addon and your deployment specs the service monitors. + +- Deploy TeraFlowSDN controller with your specific settings: +```(bash) +cd ~/tfs-ctrl +source my_deploy.sh +./deploy.sh +``` + +- Execute the test you want to meter. + +- Select the appropriate script: + - Device_Driver_Methods : To report Device Driver Methods + - Device_Driver_Details : To report Device Add/Configure Details + - Service_Handler_Methods : To report Service Handler Methods + - Component_RPC_Methods : To report Component RPC Methods + +- Tune the experiment settings + +- Execute the report script: +```(bash) +PYTHONPATH=./src python -m tests.tools.perf_plots.<script> +``` diff --git a/src/tests/tools/perf_plots/Service_Handler_Methods.py b/src/tests/tools/perf_plots/Service_Handler_Methods.py new file mode 100644 index 0000000000000000000000000000000000000000..a57757274d518450672ccec5a08ef6afb4c527be --- /dev/null +++ b/src/tests/tools/perf_plots/Service_Handler_Methods.py @@ -0,0 +1,99 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, re +from typing import Dict, List, Tuple +from .tools.FileSystem import create_folders +from .tools.HistogramData import HistogramData +from .tools.Plotter import plot_histogram +from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names +from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms + +##### EXPERIMENT SETTINGS ############################################################################################## + +EXPERIMENT_NAME = 'L2VPN with Emulated' +EXPERIMENT_ID = 'l2vpn-emu' +TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc) +TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc) +TIME_STEP = '1m' +LABEL_FILTERS = { + #'handler': 'l2nm_emulated', +} + +##### ENVIRONMENT SETTINGS ############################################################################################# + +PROM_ADDRESS = '127.0.0.1' +PROM_PORT = 9090 +OUT_FOLDER = 'data/perf/' + +##### PLOT-SPECIFIC CUSTOMIZATIONS ##################################################################################### + +EXPERIMENT_ID += '/svc-hdl-methods' +SERIES_MATCH = 'tfs_service_handler_.+_histogram_duration_bucket' +RE_SERIES_NAME = re.compile(r'^tfs_service_handler_(.+)_histogram_duration_bucket$') +SERIES_LABELS = ['handler'] + +def update_keys(handler : str, method : str) -> Tuple[Tuple, Tuple]: + collection_keys = (handler,) + histogram_keys = (method,) + return collection_keys, histogram_keys + +def get_plot_specs(folders : Dict[str, str], handler : str) -> Tuple[str, str]: + title = 'Service Handler - {:s}'.format(handler.title()) + filepath = '{:s}/{:s}.png'.format(folders['png'], handler) + return title, filepath + +##### AUTOMATED CODE ################################################################################################### + +def get_series_names(folders : Dict[str, str]) -> List[str]: + series_names = get_prometheus_series_names( + PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END, + raw_json_filepath='{:s}/_series.json'.format(folders['json']) + ) + return series_names + +def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]: + m = RE_SERIES_NAME.match(series_name) + if m is None: + # pylint: disable=broad-exception-raised + raise Exception('Unparsable series name: {:s}'.format(str(series_name))) + extra_labels = m.groups() + results = get_prometheus_range( + PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP, + raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name) + ) + histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels) + unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False) + save_histograms(histograms, folders['csv']) + return histograms + +def main() -> None: + histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict() + + folders = create_folders(OUT_FOLDER, EXPERIMENT_ID) + series_names = get_series_names(folders) + + for series_name in series_names: + histograms = get_histogram_data(series_name, folders) + for histogram_keys, histogram_data in histograms.items(): + collection_keys,histogram_keys = update_keys(*histogram_keys) + histograms = histograms_collection.setdefault(collection_keys, dict()) + histograms[histogram_keys] = histogram_data + + for histogram_keys,histograms in histograms_collection.items(): + title, filepath = get_plot_specs(folders, *histogram_keys) + plot_histogram(histograms, filepath, title=title) + +if __name__ == '__main__': + main() diff --git a/src/webui/service/policy/__init__.py b/src/tests/tools/perf_plots/__init__.py similarity index 100% rename from src/webui/service/policy/__init__.py rename to src/tests/tools/perf_plots/__init__.py diff --git a/src/tests/tools/perf_plots/tools/FileSystem.py b/src/tests/tools/perf_plots/tools/FileSystem.py new file mode 100644 index 0000000000000000000000000000000000000000..3af5dbc910be35b548a11d7a00ee79e604aa0927 --- /dev/null +++ b/src/tests/tools/perf_plots/tools/FileSystem.py @@ -0,0 +1,27 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pathlib +from typing import Dict + +def create_folders(root_folder : str, experiment_id : str) -> Dict[str, str]: + experiment_folder = root_folder + '/' + experiment_id + folders = { + 'csv' : experiment_folder + '/csv' , + 'json' : experiment_folder + '/json', + 'png' : experiment_folder + '/png' , + } + for folder in folders.values(): + pathlib.Path(folder).mkdir(parents=True, exist_ok=True) + return folders diff --git a/src/tests/tools/perf_plots/tools/Histogram.py b/src/tests/tools/perf_plots/tools/Histogram.py new file mode 100644 index 0000000000000000000000000000000000000000..0380b5bd21804cfc468a1f9bd19565337f76f741 --- /dev/null +++ b/src/tests/tools/perf_plots/tools/Histogram.py @@ -0,0 +1,88 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +from typing import Dict, List, Tuple +from .HistogramData import HistogramData + +def results_to_histograms( + results : List[Dict], key_labels : List[str], extra_labels : List[str] = [] +) -> Dict[Tuple, HistogramData]: + histograms : Dict[Tuple, HistogramData] = dict() + for result in results: + metric : Dict = result['metric'] + labels = [metric[l] for l in key_labels] + if len(extra_labels) > 0: labels.extend(extra_labels) + histogram_key = tuple(labels) + histogram = histograms.get(histogram_key) + if histogram is None: + histogram = histograms.setdefault( + histogram_key, HistogramData(timestamps=set(), bins=set(), data=dict())) + bin_ = float(metric['le']) + histogram.bins.add(bin_) + + values : List[Tuple[int, str]] = result['values'] + for timestamp,count in values: + histogram.timestamps.add(timestamp) + histogram.data.setdefault(timestamp, dict())[bin_] = int(count) + return histograms + +def unaccumulate_histogram( + histogram : HistogramData, process_bins : bool = True, process_timestamps : bool = True +) -> None: + timestamps = sorted(histogram.timestamps) + bins = sorted(histogram.bins) + accumulated_over_time = {b:0 for b in bins} + for timestamp in timestamps: + bin_to_count = histogram.data.get(timestamp) + if bin_to_count is None: continue + + accumulated_over_bins = 0 + for bin_ in bins: + count = bin_to_count[bin_] + + if process_bins: + count -= accumulated_over_bins + accumulated_over_bins += count + + if process_timestamps: + count -= accumulated_over_time[bin_] + accumulated_over_time[bin_] += count + + bin_to_count[bin_] = count + +def unaccumulate_histograms( + histograms : Dict[Tuple, HistogramData], process_bins : bool = True, process_timestamps : bool = True +) -> None: + for histogram in histograms.values(): + unaccumulate_histogram(histogram, process_bins=process_bins, process_timestamps=process_timestamps) + +def save_histogram(filepath : str, histogram : HistogramData) -> None: + timestamps = sorted(histogram.timestamps) + bins = sorted(histogram.bins) + header = [''] + [str(b) for b in bins] + with open(filepath, 'w', encoding='UTF-8') as f: + writer = csv.writer(f) + writer.writerow(header) + for timestamp in timestamps: + bin_to_count = histogram.data.get(timestamp, {}) + writer.writerow([timestamp] + [ + str(bin_to_count.get(bin_, 0)) + for bin_ in bins + ]) + +def save_histograms(histograms : Dict[Tuple, HistogramData], data_folder : str) -> None: + for histogram_keys, histogram_data in histograms.items(): + filepath = '{:s}/{:s}.csv'.format(data_folder, '__'.join(histogram_keys)) + save_histogram(filepath, histogram_data) diff --git a/src/tests/tools/perf_plots/tools/HistogramData.py b/src/tests/tools/perf_plots/tools/HistogramData.py new file mode 100644 index 0000000000000000000000000000000000000000..7469853c636b089b9dff8473b34fb7ee6913d1aa --- /dev/null +++ b/src/tests/tools/perf_plots/tools/HistogramData.py @@ -0,0 +1,22 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Dict, Set + +@dataclass +class HistogramData: + timestamps : Set[int] + bins : Set[float] + data : Dict[int, Dict[float, int]] diff --git a/src/tests/tools/perf_plots/tools/Plotter.py b/src/tests/tools/perf_plots/tools/Plotter.py new file mode 100644 index 0000000000000000000000000000000000000000..c0ce92a7f91198bcac9692a55f3c122792b16b84 --- /dev/null +++ b/src/tests/tools/perf_plots/tools/Plotter.py @@ -0,0 +1,59 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import matplotlib.pyplot as plt +from typing import Dict, Optional, Tuple +from .HistogramData import HistogramData + +def plot_histogram( + histograms : Dict[Tuple, HistogramData], filepath : str, + title : Optional[str] = None, label_separator : str = ' ', dpi : int = 600, + legend_loc : str = 'best', grid : bool = True +) -> None: + + # plot the cumulative histogram + _, ax = plt.subplots(figsize=(8, 8)) + + num_series = 0 + for histogram_keys, histogram_data in histograms.items(): + bins = sorted(histogram_data.bins) + + last_timestamp = max(histogram_data.timestamps) + counts = histogram_data.data.get(last_timestamp) + counts = [int(counts[bin_]) for bin_ in bins] + if sum(counts) == 0: continue + num_series += 1 + + bins.insert(0, 0) + bins = np.array(bins).astype(float) + counts = np.array(counts).astype(float) + + assert len(bins) == len(counts) + 1 + centroids = (bins[1:] + bins[:-1]) / 2 + + label = label_separator.join(histogram_keys) + ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True, + histtype='step', cumulative=True, label=label) + + if num_series == 0: return + + ax.grid(grid) + ax.legend(loc=legend_loc) + if title is not None: ax.set_title(title) + ax.set_xlabel('seconds') + ax.set_ylabel('Likelihood of occurrence') + plt.xscale('log') + plt.savefig(filepath, dpi=(dpi)) + plt.show() diff --git a/src/tests/tools/perf_plots/tools/Prometheus.py b/src/tests/tools/perf_plots/tools/Prometheus.py new file mode 100644 index 0000000000000000000000000000000000000000..60a06b202e5b9710bb814974360fd6d8e4580cc4 --- /dev/null +++ b/src/tests/tools/perf_plots/tools/Prometheus.py @@ -0,0 +1,59 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, requests, time +from datetime import datetime +from typing import Dict, List, Optional + +def get_prometheus_series_names( + address : str, port : int, metric_match : str, time_start : datetime, time_end : datetime, timeout : int = 10, + raw_json_filepath : Optional[str] = None +) -> List[str]: + str_url = 'http://{:s}:{:d}/api/v1/label/__name__/values'.format(address, port) + params = { + 'match[]': '{{__name__=~"{:s}"}}'.format(metric_match), + 'start': time.mktime(time_start.timetuple()), + 'end' : time.mktime(time_end.timetuple()), + } + response = requests.get(str_url, params=params, timeout=timeout) + results = response.json() + if raw_json_filepath is not None: + with open(raw_json_filepath, 'w', encoding='UTF-8') as f: + f.write(json.dumps(results, sort_keys=True)) + assert results['status'] == 'success' + return results['data'] + +def get_prometheus_range( + address : str, port : int, metric_name : str, labels : Dict[str, str], time_start : datetime, time_end : datetime, + time_step : str, timeout : int = 10, raw_json_filepath : Optional[str] = None +) -> List[Dict]: + str_url = 'http://{:s}:{:d}/api/v1/query_range'.format(address, port) + str_query = metric_name + if len(labels) > 0: + str_labels = ', '.join(['{:s}="{:s}"'.format(name, value) for name,value in labels.items()]) + str_query += '{{{:s}}}'.format(str_labels) + params = { + 'query': str_query, + 'start': time.mktime(time_start.timetuple()), + 'end' : time.mktime(time_end.timetuple()), + 'step' : time_step, + } + response = requests.get(str_url, params=params, timeout=timeout) + results = response.json() + if raw_json_filepath is not None: + with open(raw_json_filepath, 'w', encoding='UTF-8') as f: + f.write(json.dumps(results, sort_keys=True)) + assert results['status'] == 'success' + assert results['data']['resultType'] == 'matrix' + return results['data']['result'] diff --git a/src/tests/tools/perf_plots/tools/__init__.py b/src/tests/tools/perf_plots/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612 --- /dev/null +++ b/src/tests/tools/perf_plots/tools/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/webui/grafana_prom_device_exec_details.json b/src/webui/grafana_prom_device_exec_details.json new file mode 100644 index 0000000000000000000000000000000000000000..a18c2b91cabd942dd5908ff3e6b3966528e62fa0 --- /dev/null +++ b/src/webui/grafana_prom_device_exec_details.json @@ -0,0 +1,258 @@ +{"overwrite": true, "folderId": 0, "dashboard": + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "iteration": 1683036452435, + "links": [], + "liveNow": false, + "panels": [ + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "linear", + "colorScheme": "interpolateRdYlGn", + "exponent": 0.5, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 22, + "w": 24, + "x": 0, + "y": 0 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 2, + "interval": "60s", + "legend": { + "show": true + }, + "pluginVersion": "7.5.4", + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(\r\n max_over_time(tfs_device_execution_details_histogram_duration_bucket{driver=~\"[[driver]]\", operation=~\"[[operation]]\", step=~\"[[step]]\"}[1m]) -\r\n min_over_time(tfs_device_execution_details_histogram_duration_bucket{driver=~\"[[driver]]\", operation=~\"[[operation]]\", step=~\"[[step]]\"}[1m])\r\n) by (le)", + "format": "heatmap", + "instant": false, + "interval": "1m", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Histogram", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "format": "s", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + } + ], + "refresh": "5s", + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, operation)", + "hide": 0, + "includeAll": true, + "label": "Operation", + "multi": true, + "name": "operation", + "options": [], + "query": { + "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, operation)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, driver)", + "hide": 0, + "includeAll": true, + "label": "Driver", + "multi": true, + "name": "driver", + "options": [], + "query": { + "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, driver)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, step)", + "hide": 0, + "includeAll": true, + "label": "Step", + "multi": true, + "name": "step", + "options": [], + "query": { + "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, step)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, pod)", + "hide": 0, + "includeAll": true, + "label": "Pod", + "multi": true, + "name": "pod", + "options": [], + "query": { + "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, pod)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "TFS / Device Execution Details", + "uid": "tfs-dev-exec", + "version": 4, + "weekStart": "" + } +} diff --git a/src/webui/grafana_prom_device_config_exec_details.json b/src/webui/old/grafana_prom_device_config_exec_details.json similarity index 100% rename from src/webui/grafana_prom_device_config_exec_details.json rename to src/webui/old/grafana_prom_device_config_exec_details.json diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index e7f50ed42d19921b3423617f6860b5630e93adba..3c64f45c90457e1b6a9553e60634879a28910a31 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -95,8 +95,8 @@ def create_app(use_config=None, web_app_root=None): from webui.service.link.routes import link # pylint: disable=import-outside-toplevel app.register_blueprint(link) - from webui.service.policy.routes import policy # pylint: disable=import-outside-toplevel - app.register_blueprint(policy) + from webui.service.policy_rule.routes import policy_rule # pylint: disable=import-outside-toplevel + app.register_blueprint(policy_rule) app.jinja_env.globals.update({ # pylint: disable=no-member 'enumerate' : enumerate, diff --git a/src/webui/service/policy_rule/__init__.py b/src/webui/service/policy_rule/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612 --- /dev/null +++ b/src/webui/service/policy_rule/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/webui/service/policy/routes.py b/src/webui/service/policy_rule/routes.py similarity index 62% rename from src/webui/service/policy/routes.py rename to src/webui/service/policy_rule/routes.py index 6d14f86b4f1428695b474b3f2e2dd4dc72657452..5a99cf8b2f6ce71c571c9d31a2118a5390ee7d15 100644 --- a/src/webui/service/policy/routes.py +++ b/src/webui/service/policy_rule/routes.py @@ -18,33 +18,33 @@ from common.proto.context_pb2 import Empty from common.proto.policy_pb2 import PolicyRuleStateEnum from context.client.ContextClient import ContextClient -policy = Blueprint('policy', __name__, url_prefix='/policy') +policy_rule = Blueprint('policy_rule', __name__, url_prefix='/policy_rule') context_client = ContextClient() -@policy.get('/') +@policy_rule.get('/') def home(): context_client.connect() policy_rules = context_client.ListPolicyRules(Empty()) policy_rules = policy_rules.policyRules context_client.close() - return render_template('policy/home.html', policy_rules=policy_rules, prse=PolicyRuleStateEnum) + return render_template('policy_rule/home.html', policy_rules=policy_rules, prse=PolicyRuleStateEnum) -#@policy.get('<path:policy_uuid>/detail') -#def detail(policy_uuid: str): +#@policy_rule.get('<path:policy_rule_uuid>/detail') +#def detail(policy_rule_uuid: str): # try: # context_client.connect() # -# slice_obj = get_slice_by_uuid(context_client, slice_uuid, rw_copy=False) -# if slice_obj is None: -# flash('Context({:s})/Slice({:s}) not found'.format(str(context_uuid), str(slice_uuid)), 'danger') -# slice_obj = Slice() +# policy_rule_obj = get_policy_rule_by_uuid(context_client, policy_rule_uuid, rw_copy=False) +# if policy_rule_obj is None: +# flash('Context({:s})/PolicyRule({:s}) not found'.format(str(context_uuid), str(policy_rule_uuid)), 'danger') +# policy_rule_obj = PolicyRule() # # context_client.close() # # return render_template( -# 'slice/detail.html', slice=slice_obj, prse=PolicyRuleStateEnum) +# 'policy_rule/detail.html', policy_rule=policy_rule_obj, prse=PolicyRuleStateEnum) # except Exception as e: -# flash('The system encountered an error and cannot show the details of this slice.', 'warning') +# flash('The system encountered an error and cannot show the details of this policy_rule.', 'warning') # current_app.logger.exception(e) -# return redirect(url_for('slice.home')) +# return redirect(url_for('policy_rule.home')) diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 61c283b0d957b4d13b7cc57e47d3ea2675ab76f0..4c31b61935aca2bd7d2a5e7642168afdea6fd02d 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -84,10 +84,10 @@ {% endif %} </li> <li class="nav-item"> - {% if '/policy/' in request.path %} - <a class="nav-link active" aria-current="page" href="{{ url_for('policy.home') }}">Policy</a> + {% if '/policy_rule/' in request.path %} + <a class="nav-link active" aria-current="page" href="{{ url_for('policy_rule.home') }}">Policy Rules</a> {% else %} - <a class="nav-link" href="{{ url_for('policy.home') }}">Policy</a> + <a class="nav-link" href="{{ url_for('policy_rule.home') }}">Policy Rules</a> {% endif %} </li> <li class="nav-item"> @@ -177,4 +177,4 @@ <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/js/bootstrap.min.js" integrity="sha384-PsUw7Xwds7x08Ew3exXhqzbhuEYmA2xnwc8BuD6SEr+UmEHlX8/MCltYEodzWA4u" crossorigin="anonymous"></script> --> </body> - </html> \ No newline at end of file + </html> diff --git a/src/webui/service/templates/policy/home.html b/src/webui/service/templates/policy/home.html deleted file mode 100644 index 081a7f0b5291346633a2f682ba4552b5c1e362fb..0000000000000000000000000000000000000000 --- a/src/webui/service/templates/policy/home.html +++ /dev/null @@ -1,84 +0,0 @@ -<!-- - Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. ---> - -{% extends 'base.html' %} - -{% block content %} - <h1>Policy</h1> - - <div class="row"> - <div class="col"> - {{ policies | length }} policies found in context <i>{{ session['context_uuid'] }}</i> - </div> - </div> - - <table class="table table-striped table-hover"> - <thead> - <tr> - <th scope="col">UUID</th> - <th scope="col">Kind</th> - <th scope="col">Priority</th> - <th scope="col">Condition</th> - <th scope="col">Operator</th> - <th scope="col">Action</th> - <th scope="col">Service</th> - <th scope="col">Devices</th> - <th scope="col">State</th> - <th scope="col">Message</th> - <th scope="col">Extra</th> - <th scope="col"></th> - </tr> - </thead> - <tbody> - {% if policies %} - {% for policy in policies %} - {% if policy.WhichOneof('policy_rule') == 'device' %} - <tr> - <td>{{ policy.device.policyRuleBasic.policyRuleId.uuid }}</td> - <td>{{ policy.WhichOneof('policy_rule') }}</td> - <td>{{ policy.device.policyRuleBasic.priority }}</td> - <td>{{ policy.device.policyRuleBasic.conditionList }}</td> - <td>{{ policy.device.policyRuleBasic.booleanOperator }}</td> - <td>{{ policy.device.policyRuleBasic.actionList }}</td> - <td>-</td> - <td>{{ policy.device.deviceList }}</td> - <td>{{ prse.Name(policy.device.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td> - <td>{{ policy.device.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td> - </tr> - {% elif policy.WhichOneof('policy_rule') == 'service' %} - <tr> - <td>{{ policy.service.policyRuleBasic.policyRuleId.uuid }}</td> - <td>{{ policy.WhichOneof('policy_rule') }}</td> - <td>{{ policy.service.policyRuleBasic.priority }}</td> - <td>{{ policy.service.policyRuleBasic.conditionList }}</td> - <td>{{ policy.service.policyRuleBasic.booleanOperator }}</td> - <td>{{ policy.service.policyRuleBasic.actionList }}</td> - <td>{{ policy.service.serviceId }}</td> - <td>{{ policy.service.deviceList }}</td> - <td>{{ prse.Name(policy.service.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td> - <td>{{ policy.service.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td> - </tr> - {% else %} - <tr><td colspan="11">Unsupported policy type {{ policy.WhichOneof('policy_rule') }}</td></tr> - {% endif %} - {% endfor %} - {% else %} - <tr><td colspan="11">No policies found</td></tr> - {% endif %} - </tbody> - </table> - -{% endblock %} diff --git a/src/webui/service/templates/policy_rule/home.html b/src/webui/service/templates/policy_rule/home.html new file mode 100644 index 0000000000000000000000000000000000000000..c63807a6aad046d8312a07bbb412c541c5e06bc8 --- /dev/null +++ b/src/webui/service/templates/policy_rule/home.html @@ -0,0 +1,84 @@ +<!-- + Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +{% extends 'base.html' %} + +{% block content %} + <h1>Policy Rules</h1> + + <div class="row"> + <div class="col"> + {{ policy_rules | length }} policy rules found in context <i>{{ session['context_uuid'] }}</i> + </div> + </div> + + <table class="table table-striped table-hover"> + <thead> + <tr> + <th scope="col">UUID</th> + <th scope="col">Kind</th> + <th scope="col">Priority</th> + <th scope="col">Condition</th> + <th scope="col">Operator</th> + <th scope="col">Action</th> + <th scope="col">Service</th> + <th scope="col">Devices</th> + <th scope="col">State</th> + <th scope="col">Message</th> + <th scope="col">Extra</th> + <th scope="col"></th> + </tr> + </thead> + <tbody> + {% if policy_rules %} + {% for policy_rule in policy_rules %} + {% if policy_rule.WhichOneof('policy_rule') == 'device' %} + <tr> + <td>{{ policy_rule.device.policyRuleBasic.policyRuleId.uuid }}</td> + <td>{{ policy_rule.WhichOneof('policy_rule') }}</td> + <td>{{ policy_rule.device.policyRuleBasic.priority }}</td> + <td>{{ policy_rule.device.policyRuleBasic.conditionList }}</td> + <td>{{ policy_rule.device.policyRuleBasic.booleanOperator }}</td> + <td>{{ policy_rule.device.policyRuleBasic.actionList }}</td> + <td>-</td> + <td>{{ policy_rule.device.deviceList }}</td> + <td>{{ prse.Name(policy_rule.device.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td> + <td>{{ policy_rule.device.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td> + </tr> + {% elif policy_rule.WhichOneof('policy_rule') == 'service' %} + <tr> + <td>{{ policy_rule.service.policyRuleBasic.policyRuleId.uuid }}</td> + <td>{{ policy_rule.WhichOneof('policy_rule') }}</td> + <td>{{ policy_rule.service.policyRuleBasic.priority }}</td> + <td>{{ policy_rule.service.policyRuleBasic.conditionList }}</td> + <td>{{ policy_rule.service.policyRuleBasic.booleanOperator }}</td> + <td>{{ policy_rule.service.policyRuleBasic.actionList }}</td> + <td>{{ policy_rule.service.serviceId }}</td> + <td>{{ policy_rule.service.deviceList }}</td> + <td>{{ prse.Name(policy_rule.service.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td> + <td>{{ policy_rule.service.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td> + </tr> + {% else %} + <tr><td colspan="11">Unsupported policy rule type {{ policy_rule.WhichOneof('policy_rule') }}</td></tr> + {% endif %} + {% endfor %} + {% else %} + <tr><td colspan="11">No policy rule found</td></tr> + {% endif %} + </tbody> + </table> + +{% endblock %}