diff --git a/src/compute/tests/test_slice.py b/src/compute/tests/test_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..61f286eb74a876fa02546fc2bf1dcd8f092e718a
--- /dev/null
+++ b/src/compute/tests/test_slice.py
@@ -0,0 +1,125 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, random, uuid
+from typing import Dict, Tuple
+from compute.service.rest_server.nbi_plugins.ietf_network_slice.bindings.network_slice_services import (
+    NetworkSliceServices
+)
+
+# R1 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R1_UUID = "ed2388eb-5fb9-5888-a4f4-160267d3e19b"
+R1_PORT_13_0_UUID_OPTICAL = "20440915-1a6c-5e7b-a80f-b0e0e51f066d"
+R1_PORT_13_1_UUID_COPPER = "ff900d5d-2ac0-576c-9628-a2d016681f9d"
+
+# R2 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R2_UUID = "49ce0312-1274-523b-97b8-24d0eca2d72d"
+R2_PORT_13_0_UUID_OPTICAL = "214618cb-b63b-5e66-84c2-45c1c016e5f0"
+R2_PORT_13_1_UUID_COPPER = "4e0f7fb4-5d22-56ad-a00e-20bffb4860f9"
+
+# R3 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R3_UUID = "3bc8e994-a3b9-5f60-9c77-6608b1d08313"
+R3_PORT_13_0_UUID_OPTICAL = "da5196f5-d651-5def-ada6-50ed6430279d"
+R3_PORT_13_1_UUID_COPPER = "43d221fa-5701-5740-a129-502131f5bda2"
+
+# R4 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R4_UUID = "b43e6361-2573-509d-9a88-1793e751b10d"
+R4_PORT_13_0_UUID_OPTICAL = "241b74a7-8677-595c-ad65-cc9093c1e341"
+R4_PORT_13_1_UUID_COPPER = "c57abf46-caaf-5954-90cc-1fec0a69330e"
+
+node_dict = {R1_PORT_13_1_UUID_COPPER: R1_UUID,
+             R2_PORT_13_1_UUID_COPPER: R2_UUID,
+             R3_PORT_13_1_UUID_COPPER: R3_UUID,
+             R4_PORT_13_1_UUID_COPPER: R4_UUID}
+list_endpoints = [R1_PORT_13_1_UUID_COPPER,
+                  R2_PORT_13_1_UUID_COPPER,
+                  R3_PORT_13_1_UUID_COPPER,
+                  R4_PORT_13_1_UUID_COPPER]
+
+list_availability= [99, 99.9, 99.99, 99.999, 99.9999]
+list_bw = [10, 40, 50, 100, 150, 200, 400]
+list_owner = ["Telefonica", "CTTC", "Telenor", "ADVA", "Ubitech", "ATOS"]
+
+URL_POST = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services"
+URL_DELETE = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services/slice-service="
+
+def generate_request(seed: str) -> Tuple[Dict, str]:
+
+    ns = NetworkSliceServices()
+
+    # Slice 1
+    suuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(seed)))
+    slice1 = ns.slice_service[suuid]
+    slice1.service_description = "Test slice for OFC 2023 demo"
+    slice1.status().admin_status().status = "Planned"  # TODO not yet mapped
+
+    # SDPS: R1 optical to R3 optical
+    sdps1 = slice1.sdps().sdp
+    while True:
+        ep1_uuid = random.choice(list_endpoints)
+        ep2_uuid = random.choice(list_endpoints)
+        if ep1_uuid != ep2_uuid:
+            break
+
+    sdps1[ep1_uuid].node_id = node_dict.get(ep1_uuid)
+    sdps1[ep2_uuid].node_id = node_dict.get(ep2_uuid)
+
+    # Connectivity group: Connection construct and 2 sla constrains:
+    #   - Bandwidth
+    #   - Availability
+    cg_uuid = str(uuid.uuid4())
+    cg = slice1.connection_groups().connection_group
+    cg1 = cg[cg_uuid]
+
+    cc1 = cg1.connectivity_construct[0]
+    cc1.cc_id = 5
+    p2p = cc1.connectivity_construct_type.p2p()
+    p2p.p2p_sender_sdp = ep1_uuid
+    p2p.p2p_receiver_sdp = ep2_uuid
+
+    slo_custom = cc1.slo_sle_policy.custom()
+    metric_bounds = slo_custom.service_slo_sle_policy().metric_bounds().metric_bound
+
+    # SLO Bandwidth
+    slo_bandwidth = metric_bounds["service-slo-two-way-bandwidth"]
+    slo_bandwidth.value_description = "Guaranteed bandwidth"
+    slo_bandwidth.bound = int(random.choice(list_bw))
+    slo_bandwidth.metric_unit = "Gbps"
+
+    # SLO Availability
+    slo_availability = metric_bounds["service-slo-availability"]
+    slo_availability.value_description = "Guaranteed availability"
+    slo_availability.metric_unit = "percentage"
+    slo_availability.bound = random.choice(list_availability)
+
+    json_request = {"data": ns.to_json()}
+
+    #Last, add name and owner manually
+    list_name_owner = [{"tag-type": "owner", "value": random.choice(list_owner)}]
+    json_request["data"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"] = list_name_owner
+
+    return (json_request, suuid)
+
+
+if __name__ == "__main__":
+    request = generate_request(123)
+    print(json.dumps(request[0], sort_keys=True, indent=4))
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index 974ce6f130e9c81f273f418b0a1440d148fcfb74..fdd400a2110fd4a75d6f9e8cc4820bc943eef423 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -39,14 +39,6 @@ ROUTER_ID = {
     'R149': '5.5.5.5',
     'R155': '5.5.5.1',
     'R199': '5.5.5.6',
-
-}
-
-VIRTUAL_CIRCUIT = {
-    'R149': '5.5.5.5',
-    'R155': '5.5.5.1',
-    'R199': '5.5.5.6',
-
 }
 
 class RequestGenerator:
@@ -269,8 +261,8 @@ class RequestGenerator:
 
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
-            src_router_id = ROUTER_ID.get(src_device_name)
             src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
+            src_router_id = ROUTER_ID.get(src_device_name)
             if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
 
             dst_device_name = self._device_data[dst_device_uuid]['name']
@@ -322,8 +314,8 @@ class RequestGenerator:
 
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
-            src_router_id = ROUTER_ID.get(src_device_name)
             src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
+            src_router_id = ROUTER_ID.get(src_device_name)
             if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
             src_address_ip = '10.{:d}.{:d}.{:d}'.format(x, y, src_router_num)
 
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
index 53c89cd124cb7d3431b37a50596b0b793cfa83eb..e56d436dd006197497d7774be598a480a134320c 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
@@ -33,12 +33,12 @@ DEVICE_TYPE_TO_DEEPNESS = {
     DeviceTypeEnum.EMULATED_P4_SWITCH.value              : 60,
     DeviceTypeEnum.P4_SWITCH.value                       : 60,
 
-    DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 40,
-    DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value          : 40,
-
     DeviceTypeEnum.EMULATED_XR_CONSTELLATION.value       : 40,
     DeviceTypeEnum.XR_CONSTELLATION.value                : 40,
 
+    DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 30,
+    DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value          : 30,
+
     DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value       : 30,
     DeviceTypeEnum.OPEN_LINE_SYSTEM.value                : 30,
 
diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py
index 96751e83770e1b98df4770cf74bb453f6a0519ef..acda45ce80a62a4a3723744546968e3195799b59 100644
--- a/src/service/service/task_scheduler/TaskExecutor.py
+++ b/src/service/service/task_scheduler/TaskExecutor.py
@@ -12,23 +12,28 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json
+import json, logging
 from enum import Enum
 from typing import TYPE_CHECKING, Any, Dict, Optional, Union
 from common.method_wrappers.ServiceExceptions import NotFoundException
-from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
+from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceDriverEnum, DeviceId, Service, ServiceId
 from common.tools.context_queries.Connection import get_connection_by_id
 from common.tools.context_queries.Device import get_device
 from common.tools.context_queries.Service import get_service_by_id
+from common.tools.grpc.Tools import grpc_message_list_to_json_string
 from common.tools.object_factory.Device import json_device_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
+from service.service.service_handler_api.Exceptions import (
+    UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException)
 from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class
 from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key
 
 if TYPE_CHECKING:
     from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 
+LOGGER = logging.getLogger(__name__)
+
 CacheableObject = Union[Connection, Device, Service]
 
 class CacheableObjectType(Enum):
@@ -169,5 +174,21 @@ class TaskExecutor:
         self, connection : Connection, service : Service, **service_handler_settings
     ) -> '_ServiceHandler':
         connection_devices = self.get_devices_from_connection(connection, exclude_managed_by_controller=True)
-        service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices)
-        return service_handler_class(service, self, **service_handler_settings)
+        try:
+            service_handler_class = get_service_handler_class(
+                self._service_handler_factory, service, connection_devices)
+            return service_handler_class(service, self, **service_handler_settings)
+        except (UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException):
+            dict_connection_devices = {
+                cd_data.name : (cd_uuid, cd_data.name, {
+                    (device_driver, DeviceDriverEnum.Name(device_driver))
+                    for device_driver in cd_data.device_drivers
+                })
+                for cd_uuid,cd_data in connection_devices.items()
+            }
+            LOGGER.exception(
+                'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}'.format(
+                    grpc_message_list_to_json_string(service), grpc_message_list_to_json_string(connection),
+                    str(dict_connection_devices)
+                )
+            )
diff --git a/src/tests/tools/perf_plots/Component_RPC_Methods.py b/src/tests/tools/perf_plots/Component_RPC_Methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..7aa3ed304bc7d923a5ba634917fd95c28aea513b
--- /dev/null
+++ b/src/tests/tools/perf_plots/Component_RPC_Methods.py
@@ -0,0 +1,123 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Optional, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/component-rpcs'
+SERIES_MATCH   = 'tfs_.+_rpc_.+_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_(.+)_rpc_(.+)_histogram_duration_bucket$')
+SERIES_LABELS  = []
+
+SUBSYSTEMS_MAPPING = {
+    'context': {
+        'context'   : 'context',
+        'topolog'   : 'topology',
+        'device'    : 'device',
+        'endpoint'  : 'device',
+        'link'      : 'link',
+        'service'   : 'service',
+        'slice'     : 'slice',
+        'policyrule': 'policyrule',
+        'connection': 'connection',
+    }
+}
+
+def get_subsystem(component : str, rpc_method : str) -> Optional[str]:
+    return next(iter([
+        subsystem
+        for pattern,subsystem in SUBSYSTEMS_MAPPING.get(component, {}).items()
+        if pattern in rpc_method
+    ]), None)
+
+def update_keys(component : str, rpc_method : str) -> Tuple[Tuple, Tuple]:
+    subsystem = get_subsystem(component, rpc_method)
+    collection_keys = (component, subsystem)
+    histogram_keys = (rpc_method,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], component : str, subsystem : Optional[str]) -> Tuple[str, str]:
+    if subsystem is None:
+        title = '{:s} - RPC Methods [{:s}]'.format(component.title(), EXPERIMENT_NAME)
+        filepath = '{:s}/{:s}.png'.format(folders['png'], component)
+    else:
+        title = '{:s} - RPC Methods - {:s} [{:s}]'.format(component.title(), subsystem.title(), EXPERIMENT_NAME)
+        filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], component, subsystem)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/tests/tools/perf_plots/Device_Driver_Details.py b/src/tests/tools/perf_plots/Device_Driver_Details.py
new file mode 100644
index 0000000000000000000000000000000000000000..24b287cc826872f48acc8c24c4f51ecd7ba8c676
--- /dev/null
+++ b/src/tests/tools/perf_plots/Device_Driver_Details.py
@@ -0,0 +1,101 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Optional, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {
+    #'driver': 'emulated',
+    #'operation': 'configure_device', # add_device / configure_device
+    #'step': 'get_device',
+}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/dev-drv-details'
+SERIES_MATCH   = 'tfs_device_execution_details_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_device_execution_details_histogram_duration_bucket$')
+SERIES_LABELS  = ['driver', 'operation', 'step']
+
+def update_keys(driver : str, operation : str, step : str) -> Tuple[Tuple, Tuple]:
+    collection_keys = (driver, operation)
+    histogram_keys = (step,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], driver : str, operation : str) -> Tuple[str, str]:
+    title = 'Device Driver - {:s} - {:s}'.format(driver.title(), operation.replace('_', '').title())
+    filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], driver, operation)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/tests/tools/perf_plots/Device_Driver_Methods.py b/src/tests/tools/perf_plots/Device_Driver_Methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..a92bd13747f6c7c6aa861c989e9f1199ef3870d0
--- /dev/null
+++ b/src/tests/tools/perf_plots/Device_Driver_Methods.py
@@ -0,0 +1,99 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {
+    #'driver': 'emulated',
+}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/dev-drv-methods'
+SERIES_MATCH   = 'tfs_device_driver_.+_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_device_driver_(.+)_histogram_duration_bucket$')
+SERIES_LABELS  = ['driver']
+
+def update_keys(driver : str, method : str) -> Tuple[Tuple, Tuple]:
+    collection_keys = (driver,)
+    histogram_keys = (method,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], driver : str) -> Tuple[str, str]:
+    title = 'Device Driver - {:s}'.format(driver.title())
+    filepath = '{:s}/{:s}.png'.format(folders['png'], driver)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/tests/tools/perf_plots/README.md b/src/tests/tools/perf_plots/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..14dcb1c9508a1b63c62ae84aef4abe3e17589ef7
--- /dev/null
+++ b/src/tests/tools/perf_plots/README.md
@@ -0,0 +1,29 @@
+# Tool: Perf Plots Generator:
+
+Simple tool to gather performance data from Prometheus and produce histogram plots.
+
+## Example:
+
+- Ensure your MicroK8s includes the monitoring addon and your deployment specs the service monitors.
+
+- Deploy TeraFlowSDN controller with your specific settings:
+```(bash)
+cd ~/tfs-ctrl
+source my_deploy.sh 
+./deploy.sh 
+```
+
+- Execute the test you want to meter.
+
+- Select the appropriate script:
+    - Device_Driver_Methods   : To report Device Driver Methods
+    - Device_Driver_Details   : To report Device Add/Configure Details
+    - Service_Handler_Methods : To report Service Handler Methods
+    - Component_RPC_Methods   : To report Component RPC Methods
+
+- Tune the experiment settings
+
+- Execute the report script:
+```(bash)
+PYTHONPATH=./src python -m tests.tools.perf_plots.<script>
+```
diff --git a/src/tests/tools/perf_plots/Service_Handler_Methods.py b/src/tests/tools/perf_plots/Service_Handler_Methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..a57757274d518450672ccec5a08ef6afb4c527be
--- /dev/null
+++ b/src/tests/tools/perf_plots/Service_Handler_Methods.py
@@ -0,0 +1,99 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {
+    #'handler': 'l2nm_emulated',
+}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/svc-hdl-methods'
+SERIES_MATCH   = 'tfs_service_handler_.+_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_service_handler_(.+)_histogram_duration_bucket$')
+SERIES_LABELS  = ['handler']
+
+def update_keys(handler : str, method : str) -> Tuple[Tuple, Tuple]:
+    collection_keys = (handler,)
+    histogram_keys = (method,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], handler : str) -> Tuple[str, str]:
+    title = 'Service Handler - {:s}'.format(handler.title())
+    filepath = '{:s}/{:s}.png'.format(folders['png'], handler)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/tests/tools/perf_plots/__init__.py b/src/tests/tools/perf_plots/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/tests/tools/perf_plots/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/tools/perf_plots/tools/FileSystem.py b/src/tests/tools/perf_plots/tools/FileSystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..3af5dbc910be35b548a11d7a00ee79e604aa0927
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/FileSystem.py
@@ -0,0 +1,27 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pathlib
+from typing import Dict
+
+def create_folders(root_folder : str, experiment_id : str) -> Dict[str, str]:
+    experiment_folder = root_folder + '/' + experiment_id
+    folders = {
+        'csv'  : experiment_folder + '/csv' ,
+        'json' : experiment_folder + '/json',
+        'png'  : experiment_folder + '/png' ,
+    }
+    for folder in folders.values():
+        pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
+    return folders
diff --git a/src/tests/tools/perf_plots/tools/Histogram.py b/src/tests/tools/perf_plots/tools/Histogram.py
new file mode 100644
index 0000000000000000000000000000000000000000..0380b5bd21804cfc468a1f9bd19565337f76f741
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/Histogram.py
@@ -0,0 +1,88 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import csv
+from typing import Dict, List, Tuple
+from .HistogramData import HistogramData
+
+def results_to_histograms(
+    results : List[Dict], key_labels : List[str], extra_labels : List[str] = []
+) -> Dict[Tuple, HistogramData]:
+    histograms : Dict[Tuple, HistogramData] = dict()
+    for result in results:
+        metric : Dict = result['metric']
+        labels = [metric[l] for l in key_labels]
+        if len(extra_labels) > 0: labels.extend(extra_labels)
+        histogram_key = tuple(labels)
+        histogram = histograms.get(histogram_key)
+        if histogram is None:
+            histogram = histograms.setdefault(
+                histogram_key, HistogramData(timestamps=set(), bins=set(), data=dict()))
+        bin_ = float(metric['le'])
+        histogram.bins.add(bin_)
+    
+        values : List[Tuple[int, str]] = result['values']
+        for timestamp,count in values:
+            histogram.timestamps.add(timestamp)
+            histogram.data.setdefault(timestamp, dict())[bin_] = int(count)
+    return histograms
+
+def unaccumulate_histogram(
+    histogram : HistogramData, process_bins : bool = True, process_timestamps : bool = True
+) -> None:
+    timestamps = sorted(histogram.timestamps)
+    bins = sorted(histogram.bins)
+    accumulated_over_time = {b:0 for b in bins}
+    for timestamp in timestamps:
+        bin_to_count = histogram.data.get(timestamp)
+        if bin_to_count is None: continue
+
+        accumulated_over_bins = 0
+        for bin_ in bins:
+            count = bin_to_count[bin_]
+
+            if process_bins:
+                count -= accumulated_over_bins
+                accumulated_over_bins += count
+
+            if process_timestamps:
+                count -= accumulated_over_time[bin_]
+                accumulated_over_time[bin_] += count
+
+            bin_to_count[bin_] = count
+
+def unaccumulate_histograms(
+    histograms : Dict[Tuple, HistogramData], process_bins : bool = True, process_timestamps : bool = True
+) -> None:
+    for histogram in histograms.values():
+        unaccumulate_histogram(histogram, process_bins=process_bins, process_timestamps=process_timestamps)
+
+def save_histogram(filepath : str, histogram : HistogramData) -> None:
+    timestamps = sorted(histogram.timestamps)
+    bins = sorted(histogram.bins)
+    header = [''] + [str(b) for b in bins]
+    with open(filepath, 'w', encoding='UTF-8') as f:
+        writer = csv.writer(f)
+        writer.writerow(header)
+        for timestamp in timestamps:
+            bin_to_count = histogram.data.get(timestamp, {})
+            writer.writerow([timestamp] + [
+                str(bin_to_count.get(bin_, 0))
+                for bin_ in bins
+            ])
+
+def save_histograms(histograms : Dict[Tuple, HistogramData], data_folder : str) -> None:
+    for histogram_keys, histogram_data in histograms.items():
+        filepath = '{:s}/{:s}.csv'.format(data_folder, '__'.join(histogram_keys))
+        save_histogram(filepath, histogram_data)
diff --git a/src/tests/tools/perf_plots/tools/HistogramData.py b/src/tests/tools/perf_plots/tools/HistogramData.py
new file mode 100644
index 0000000000000000000000000000000000000000..7469853c636b089b9dff8473b34fb7ee6913d1aa
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/HistogramData.py
@@ -0,0 +1,22 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+from typing import Dict, Set
+
+@dataclass
+class HistogramData:
+    timestamps : Set[int]
+    bins       : Set[float]
+    data       : Dict[int, Dict[float, int]]
diff --git a/src/tests/tools/perf_plots/tools/Plotter.py b/src/tests/tools/perf_plots/tools/Plotter.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0ce92a7f91198bcac9692a55f3c122792b16b84
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/Plotter.py
@@ -0,0 +1,59 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import matplotlib.pyplot as plt
+from typing import Dict, Optional, Tuple
+from .HistogramData import HistogramData
+
+def plot_histogram(
+    histograms : Dict[Tuple, HistogramData], filepath : str,
+    title : Optional[str] = None, label_separator : str = ' ', dpi : int = 600,
+    legend_loc : str = 'best', grid : bool = True
+) -> None:
+
+    # plot the cumulative histogram
+    _, ax = plt.subplots(figsize=(8, 8))
+
+    num_series = 0
+    for histogram_keys, histogram_data in histograms.items():
+        bins = sorted(histogram_data.bins)
+
+        last_timestamp = max(histogram_data.timestamps)
+        counts = histogram_data.data.get(last_timestamp)
+        counts = [int(counts[bin_]) for bin_ in bins]
+        if sum(counts) == 0: continue
+        num_series += 1
+
+        bins.insert(0, 0)
+        bins = np.array(bins).astype(float)
+        counts = np.array(counts).astype(float)
+
+        assert len(bins) == len(counts) + 1
+        centroids = (bins[1:] + bins[:-1]) / 2
+
+        label = label_separator.join(histogram_keys)
+        ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True,
+                histtype='step', cumulative=True, label=label)
+
+    if num_series == 0: return
+
+    ax.grid(grid)
+    ax.legend(loc=legend_loc)
+    if title is not None: ax.set_title(title)
+    ax.set_xlabel('seconds')
+    ax.set_ylabel('Likelihood of occurrence')
+    plt.xscale('log')
+    plt.savefig(filepath, dpi=(dpi))
+    plt.show()
diff --git a/src/tests/tools/perf_plots/tools/Prometheus.py b/src/tests/tools/perf_plots/tools/Prometheus.py
new file mode 100644
index 0000000000000000000000000000000000000000..60a06b202e5b9710bb814974360fd6d8e4580cc4
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/Prometheus.py
@@ -0,0 +1,59 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, requests, time
+from datetime import datetime
+from typing import Dict, List, Optional
+
+def get_prometheus_series_names(
+    address : str, port : int, metric_match : str, time_start : datetime, time_end : datetime, timeout : int = 10,
+    raw_json_filepath : Optional[str] = None
+) -> List[str]:
+    str_url = 'http://{:s}:{:d}/api/v1/label/__name__/values'.format(address, port)
+    params = {
+        'match[]': '{{__name__=~"{:s}"}}'.format(metric_match),
+        'start': time.mktime(time_start.timetuple()),
+        'end'  : time.mktime(time_end.timetuple()),
+    }
+    response = requests.get(str_url, params=params, timeout=timeout)
+    results = response.json()
+    if raw_json_filepath is not None:
+        with open(raw_json_filepath, 'w', encoding='UTF-8') as f:
+            f.write(json.dumps(results, sort_keys=True))
+    assert results['status'] == 'success'
+    return results['data']
+
+def get_prometheus_range(
+    address : str, port : int, metric_name : str, labels : Dict[str, str], time_start : datetime, time_end : datetime,
+    time_step : str, timeout : int = 10, raw_json_filepath : Optional[str] = None
+) -> List[Dict]:
+    str_url = 'http://{:s}:{:d}/api/v1/query_range'.format(address, port)
+    str_query = metric_name
+    if len(labels) > 0:
+        str_labels = ', '.join(['{:s}="{:s}"'.format(name, value) for name,value in labels.items()])
+        str_query += '{{{:s}}}'.format(str_labels)
+    params = {
+        'query': str_query,
+        'start': time.mktime(time_start.timetuple()),
+        'end'  : time.mktime(time_end.timetuple()),
+        'step' : time_step,
+    }
+    response = requests.get(str_url, params=params, timeout=timeout)
+    results = response.json()
+    if raw_json_filepath is not None:
+        with open(raw_json_filepath, 'w', encoding='UTF-8') as f:
+            f.write(json.dumps(results, sort_keys=True))
+    assert results['status'] == 'success'
+    assert results['data']['resultType'] == 'matrix'
+    return results['data']['result']
diff --git a/src/tests/tools/perf_plots/tools/__init__.py b/src/tests/tools/perf_plots/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+