diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 54db3253f4975516056d98488ffdab375541c532..c3639f93df36c800e60d2025c06a3670d0531cca 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -486,11 +486,11 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
     echo
 
-    # Dashboard: Device ConfigureDevice Details
-    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_config_exec_details.json' \
+    # Dashboard: Device Execution Details
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_exec_details.json' \
         ${GRAFANA_URL_UPDATED}/api/dashboards/db
     echo
-    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-confdev"
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-exec"
     DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
     curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
     echo
diff --git a/proto/load_generator.proto b/proto/load_generator.proto
index 7d0070c66f1104d9903950fb8b59f64e3ec42f71..32523b331418813b51fb542d9eb17e29fc2b13d2 100644
--- a/proto/load_generator.proto
+++ b/proto/load_generator.proto
@@ -48,17 +48,19 @@ message ScalarOrRange {
 message Parameters {
   uint64 num_requests = 1;  // if == 0, generate infinite requests
   repeated RequestTypeEnum request_types = 2;
-  float offered_load = 3;
-  float holding_time = 4;
-  float inter_arrival_time = 5;
-  repeated ScalarOrRange availability = 6;    // one from the list is selected
-  repeated ScalarOrRange capacity_gbps = 7;   // one from the list is selected
-  repeated ScalarOrRange e2e_latency_ms = 8;  // one from the list is selected
-  uint32 max_workers = 9;
-  bool do_teardown = 10;
-  bool dry_mode = 11;
-  bool record_to_dlt = 12;
-  string dlt_domain_id = 13;
+  string device_regex = 3;      // Only devices and endpoints matching the regex expression will be considered as
+  string endpoint_regex = 4;    // source-destination candidates for the requests generated.
+  float offered_load = 5;
+  float holding_time = 6;
+  float inter_arrival_time = 7;
+  repeated ScalarOrRange availability = 8;    // One from the list is selected to populate the constraint
+  repeated ScalarOrRange capacity_gbps = 9;   // One from the list is selected to populate the constraint
+  repeated ScalarOrRange e2e_latency_ms = 10; // One from the list is selected to populate the constraint
+  uint32 max_workers = 11;
+  bool do_teardown = 12;
+  bool dry_mode = 13;
+  bool record_to_dlt = 14;
+  string dlt_domain_id = 15;
 }
 
 message Status {
diff --git a/src/compute/tests/test_slice.py b/src/compute/tests/test_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..61f286eb74a876fa02546fc2bf1dcd8f092e718a
--- /dev/null
+++ b/src/compute/tests/test_slice.py
@@ -0,0 +1,125 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, random, uuid
+from typing import Dict, Tuple
+from compute.service.rest_server.nbi_plugins.ietf_network_slice.bindings.network_slice_services import (
+    NetworkSliceServices
+)
+
+# R1 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R1_UUID = "ed2388eb-5fb9-5888-a4f4-160267d3e19b"
+R1_PORT_13_0_UUID_OPTICAL = "20440915-1a6c-5e7b-a80f-b0e0e51f066d"
+R1_PORT_13_1_UUID_COPPER = "ff900d5d-2ac0-576c-9628-a2d016681f9d"
+
+# R2 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R2_UUID = "49ce0312-1274-523b-97b8-24d0eca2d72d"
+R2_PORT_13_0_UUID_OPTICAL = "214618cb-b63b-5e66-84c2-45c1c016e5f0"
+R2_PORT_13_1_UUID_COPPER = "4e0f7fb4-5d22-56ad-a00e-20bffb4860f9"
+
+# R3 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R3_UUID = "3bc8e994-a3b9-5f60-9c77-6608b1d08313"
+R3_PORT_13_0_UUID_OPTICAL = "da5196f5-d651-5def-ada6-50ed6430279d"
+R3_PORT_13_1_UUID_COPPER = "43d221fa-5701-5740-a129-502131f5bda2"
+
+# R4 emulated devices
+# Port 13-0 is Optical
+# Port 13-1 is Copper
+R4_UUID = "b43e6361-2573-509d-9a88-1793e751b10d"
+R4_PORT_13_0_UUID_OPTICAL = "241b74a7-8677-595c-ad65-cc9093c1e341"
+R4_PORT_13_1_UUID_COPPER = "c57abf46-caaf-5954-90cc-1fec0a69330e"
+
+node_dict = {R1_PORT_13_1_UUID_COPPER: R1_UUID,
+             R2_PORT_13_1_UUID_COPPER: R2_UUID,
+             R3_PORT_13_1_UUID_COPPER: R3_UUID,
+             R4_PORT_13_1_UUID_COPPER: R4_UUID}
+list_endpoints = [R1_PORT_13_1_UUID_COPPER,
+                  R2_PORT_13_1_UUID_COPPER,
+                  R3_PORT_13_1_UUID_COPPER,
+                  R4_PORT_13_1_UUID_COPPER]
+
+list_availability= [99, 99.9, 99.99, 99.999, 99.9999]
+list_bw = [10, 40, 50, 100, 150, 200, 400]
+list_owner = ["Telefonica", "CTTC", "Telenor", "ADVA", "Ubitech", "ATOS"]
+
+URL_POST = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services"
+URL_DELETE = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services/slice-service="
+
+def generate_request(seed: str) -> Tuple[Dict, str]:
+
+    ns = NetworkSliceServices()
+
+    # Slice 1
+    suuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(seed)))
+    slice1 = ns.slice_service[suuid]
+    slice1.service_description = "Test slice for OFC 2023 demo"
+    slice1.status().admin_status().status = "Planned"  # TODO not yet mapped
+
+    # SDPS: R1 optical to R3 optical
+    sdps1 = slice1.sdps().sdp
+    while True:
+        ep1_uuid = random.choice(list_endpoints)
+        ep2_uuid = random.choice(list_endpoints)
+        if ep1_uuid != ep2_uuid:
+            break
+
+    sdps1[ep1_uuid].node_id = node_dict.get(ep1_uuid)
+    sdps1[ep2_uuid].node_id = node_dict.get(ep2_uuid)
+
+    # Connectivity group: Connection construct and 2 sla constrains:
+    #   - Bandwidth
+    #   - Availability
+    cg_uuid = str(uuid.uuid4())
+    cg = slice1.connection_groups().connection_group
+    cg1 = cg[cg_uuid]
+
+    cc1 = cg1.connectivity_construct[0]
+    cc1.cc_id = 5
+    p2p = cc1.connectivity_construct_type.p2p()
+    p2p.p2p_sender_sdp = ep1_uuid
+    p2p.p2p_receiver_sdp = ep2_uuid
+
+    slo_custom = cc1.slo_sle_policy.custom()
+    metric_bounds = slo_custom.service_slo_sle_policy().metric_bounds().metric_bound
+
+    # SLO Bandwidth
+    slo_bandwidth = metric_bounds["service-slo-two-way-bandwidth"]
+    slo_bandwidth.value_description = "Guaranteed bandwidth"
+    slo_bandwidth.bound = int(random.choice(list_bw))
+    slo_bandwidth.metric_unit = "Gbps"
+
+    # SLO Availability
+    slo_availability = metric_bounds["service-slo-availability"]
+    slo_availability.value_description = "Guaranteed availability"
+    slo_availability.metric_unit = "percentage"
+    slo_availability.bound = random.choice(list_availability)
+
+    json_request = {"data": ns.to_json()}
+
+    #Last, add name and owner manually
+    list_name_owner = [{"tag-type": "owner", "value": random.choice(list_owner)}]
+    json_request["data"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"] = list_name_owner
+
+    return (json_request, suuid)
+
+
+if __name__ == "__main__":
+    request = generate_request(123)
+    print(json.dumps(request[0], sort_keys=True, indent=4))
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index e7fec041802cc661b14617a8ebfec0864c738b39..38a6b735b32ee667c3be2f5381df84c40d773c06 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -37,8 +37,8 @@ LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('Device', 'RPC')
 
-METRICS_POOL_DETAILS = MetricsPool('Device', 'exec_details', labels={
-    'step_name': '',
+METRICS_POOL_DETAILS = MetricsPool('Device', 'execution', labels={
+    'driver': '', 'operation': '', 'step': '',
 })
 
 class DeviceServiceServicerImpl(DeviceServiceServicer):
@@ -51,11 +51,15 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def AddDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId:
+        t0 = time.time()
+
         device_uuid = request.device_id.device_uuid.uuid
 
         connection_config_rules = check_connect_rules(request.device_config)
         check_no_endpoints(request.device_endpoints)
 
+        t1 = time.time()
+
         context_client = ContextClient()
         device = get_device(context_client, device_uuid, rw_copy=True)
         if device is None:
@@ -73,10 +77,15 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
         # update device_uuid to honor UUID provided by Context
         device_uuid = device.device_id.device_uuid.uuid
 
+        t2 = time.time()
+
         self.mutex_queues.wait_my_turn(device_uuid)
+        t3 = time.time()
         try:
             driver : _Driver = get_driver(self.driver_instance_cache, device)
 
+            t4 = time.time()
+
             errors = []
 
             # Sub-devices and sub-links are exposed by intermediate controllers or represent mgmt links.
@@ -86,13 +95,23 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
             new_sub_links : Dict[str, Link] = dict()
 
             if len(device.device_endpoints) == 0:
+                t5 = time.time()
                 # created from request, populate endpoints using driver
                 errors.extend(populate_endpoints(
                     device, driver, self.monitoring_loops, new_sub_devices, new_sub_links))
+                t6 = time.time()
+                t_pop_endpoints = t6 - t5
+            else:
+                t_pop_endpoints = None
 
             if len(device.device_config.config_rules) == len(connection_config_rules):
                 # created from request, populate config rules using driver
+                t7 = time.time()
                 errors.extend(populate_config_rules(device, driver))
+                t8 = time.time()
+                t_pop_config_rules = t8 - t7
+            else:
+                t_pop_config_rules = None
 
             # TODO: populate components
 
@@ -100,22 +119,60 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
                 for error in errors: LOGGER.error(error)
                 raise OperationFailedException('AddDevice', extra_details=errors)
 
+            t9 = time.time()
+
             device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
             device_id = context_client.SetDevice(device)
 
+            t10 = time.time()
+
             for sub_device in new_sub_devices.values():
                 context_client.SetDevice(sub_device)
 
+            t11 = time.time()
+
             for sub_links in new_sub_links.values():
                 context_client.SetLink(sub_links)
 
+            t12 = time.time()
+
             # Update endpoint monitoring resources with UUIDs
             device_with_uuids = get_device(
                 context_client, device_id.device_uuid.uuid, rw_copy=False, include_endpoints=True,
                 include_components=False, include_config_rules=False)
             populate_endpoint_monitoring_resources(device_with_uuids, self.monitoring_loops)
 
+            t13 = time.time()
+
             context_client.close()
+
+            t14 = time.time()
+
+            metrics_labels = dict(driver=driver.name, operation='add_device')
+
+            histogram_duration : Histogram = METRICS_POOL_DETAILS.get_or_create(
+                'details', MetricTypeEnum.HISTOGRAM_DURATION)
+            histogram_duration.labels(step='total'              , **metrics_labels).observe(t14-t0)
+            histogram_duration.labels(step='execution'          , **metrics_labels).observe(t14-t3)
+            histogram_duration.labels(step='endpoint_checks'    , **metrics_labels).observe(t1-t0)
+            histogram_duration.labels(step='get_device'         , **metrics_labels).observe(t2-t1)
+            histogram_duration.labels(step='wait_queue'         , **metrics_labels).observe(t3-t2)
+            histogram_duration.labels(step='get_driver'         , **metrics_labels).observe(t4-t3)
+            histogram_duration.labels(step='set_device'         , **metrics_labels).observe(t10-t9)
+            histogram_duration.labels(step='populate_monit_rsrc', **metrics_labels).observe(t13-t12)
+
+            if t_pop_endpoints is not None:
+                histogram_duration.labels(step='populate_endpoints', **metrics_labels).observe(t_pop_endpoints)
+
+            if t_pop_config_rules is not None:
+                histogram_duration.labels(step='populate_config_rules', **metrics_labels).observe(t_pop_config_rules)
+
+            if len(new_sub_devices) > 0:
+                histogram_duration.labels(step='set_sub_devices', **metrics_labels).observe(t11-t10)
+
+            if len(new_sub_links) > 0:
+                histogram_duration.labels(step='set_sub_links', **metrics_labels).observe(t12-t11)
+
             return device_id
         finally:
             self.mutex_queues.signal_done(device_uuid)
@@ -195,16 +252,18 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
 
             t9 = time.time()
 
+            metrics_labels = dict(driver=driver.name, operation='configure_device')
+
             histogram_duration : Histogram = METRICS_POOL_DETAILS.get_or_create(
-                'ConfigureDevice', MetricTypeEnum.HISTOGRAM_DURATION)
-            histogram_duration.labels(step_name='total'            ).observe(t9-t0)
-            histogram_duration.labels(step_name='wait_queue'       ).observe(t1-t0)
-            histogram_duration.labels(step_name='execution'        ).observe(t9-t1)
-            histogram_duration.labels(step_name='get_device'       ).observe(t3-t2)
-            histogram_duration.labels(step_name='split_rules'      ).observe(t5-t4)
-            histogram_duration.labels(step_name='configure_rules'  ).observe(t6-t5)
-            histogram_duration.labels(step_name='deconfigure_rules').observe(t7-t6)
-            histogram_duration.labels(step_name='set_device'       ).observe(t9-t8)
+                'details', MetricTypeEnum.HISTOGRAM_DURATION)
+            histogram_duration.labels(step='total'            , **metrics_labels).observe(t9-t0)
+            histogram_duration.labels(step='wait_queue'       , **metrics_labels).observe(t1-t0)
+            histogram_duration.labels(step='execution'        , **metrics_labels).observe(t9-t1)
+            histogram_duration.labels(step='get_device'       , **metrics_labels).observe(t3-t2)
+            histogram_duration.labels(step='split_rules'      , **metrics_labels).observe(t5-t4)
+            histogram_duration.labels(step='configure_rules'  , **metrics_labels).observe(t6-t5)
+            histogram_duration.labels(step='deconfigure_rules', **metrics_labels).observe(t7-t6)
+            histogram_duration.labels(step='set_device'       , **metrics_labels).observe(t9-t8)
 
             return device_id
         finally:
diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py
index 947bc8570a941f8f666c87647d89c315b1bd202a..7adaec79dc99f9b7c836acaec886b0d5bda97fb8 100644
--- a/src/device/service/driver_api/_Driver.py
+++ b/src/device/service/driver_api/_Driver.py
@@ -27,7 +27,7 @@ RESOURCE_ACL = '__acl__'
 
 
 class _Driver:
-    def __init__(self, address: str, port: int, **settings) -> None:
+    def __init__(self, name : str, address: str, port: int, **settings) -> None:
         """ Initialize Driver.
             Parameters:
                 address : str
@@ -37,7 +37,22 @@ class _Driver:
                 **settings
                     Extra settings required by the driver.
         """
-        raise NotImplementedError()
+        self._name = name
+        self._address = address
+        self._port = port
+        self._settings = settings
+
+    @property
+    def name(self): return self._name
+
+    @property
+    def address(self): return self._address
+
+    @property
+    def port(self): return self._port
+
+    @property
+    def settings(self): return self._settings
 
     def Connect(self) -> bool:
         """ Connect to the Device.
diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py
index 2acb288784d6da5b202f14c2534ee1a59486a20e..8f9453574a7333e599ea56158204627fcfdd3680 100644
--- a/src/device/service/drivers/emulated/EmulatedDriver.py
+++ b/src/device/service/drivers/emulated/EmulatedDriver.py
@@ -31,16 +31,18 @@ LOGGER = logging.getLogger(__name__)
 
 RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'^\/interface\[([^\]]+)\].*')
 
-METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'emulated'})
+DRIVER_NAME = 'emulated'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
 
 class EmulatedDriver(_Driver):
-    def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called
+    def __init__(self, address : str, port : int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
         self.__lock = threading.Lock()
         self.__initial = TreeNode('.')
         self.__running = TreeNode('.')
         self.__subscriptions = TreeNode('.')
 
-        endpoints = settings.get('endpoints', [])
+        endpoints = self.settings.get('endpoints', [])
         endpoint_resources = []
         for endpoint in endpoints:
             endpoint_resource = compose_resource_endpoint(endpoint)
diff --git a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
index 96dfd2c15f6b359e254a6d6a24dfe42a546833ce..9498dc84cc6991fd2295371842fa8508c961f1bc 100644
--- a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
+++ b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
@@ -39,21 +39,23 @@ ALL_RESOURCE_KEYS = [
 
 SERVICE_TYPE = 'ELINE'
 
-METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'ietf_l2vpn'})
+DRIVER_NAME = 'ietf_l2vpn'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
 
 class IetfL2VpnDriver(_Driver):
-    def __init__(self, address: str, port: int, **settings) -> None:    # pylint: disable=super-init-not-called
+    def __init__(self, address: str, port: int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
         self.__lock = threading.Lock()
         self.__started = threading.Event()
         self.__terminate = threading.Event()
-        username = settings.get('username')
-        password = settings.get('password')
-        scheme = settings.get('scheme', 'http')
-        wim = {'wim_url': '{:s}://{:s}:{:d}'.format(scheme, address, int(port))}
+        username = self.settings.get('username')
+        password = self.settings.get('password')
+        scheme = self.settings.get('scheme', 'http')
+        wim = {'wim_url': '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port))}
         wim_account = {'user': username, 'password': password}
         # Mapping updated dynamically with each request
         config = {'mapping_not_needed': False, 'service_endpoint_mapping': []}
-        self.dac = TfsDebugApiClient(address, int(port), scheme=scheme, username=username, password=password)
+        self.dac = TfsDebugApiClient(self.address, int(self.port), scheme=scheme, username=username, password=password)
         self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config)
         self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors
 
diff --git a/src/device/service/drivers/microwave/IETFApiDriver.py b/src/device/service/drivers/microwave/IETFApiDriver.py
index fad7cd0736ec35c5675461af241b2e7de2295dac..a8ef9094652378df8d1f1a55868849316b7ec95b 100644
--- a/src/device/service/drivers/microwave/IETFApiDriver.py
+++ b/src/device/service/drivers/microwave/IETFApiDriver.py
@@ -23,20 +23,22 @@ from .Tools import create_connectivity_service, find_key, config_getter, delete_
 
 LOGGER = logging.getLogger(__name__)
 
-METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'microwave'})
+DRIVER_NAME = 'microwave'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
 
 class IETFApiDriver(_Driver):
-    def __init__(self, address: str, port: int, **settings) -> None:    # pylint: disable=super-init-not-called
+    def __init__(self, address: str, port: int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
         self.__lock = threading.Lock()
         self.__started = threading.Event()
         self.__terminate = threading.Event()
-        username = settings.get('username')
-        password = settings.get('password')
+        username = self.settings.get('username')
+        password = self.settings.get('password')
         self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
-        scheme = settings.get('scheme', 'http')
-        self.__ietf_root = '{:s}://{:s}:{:d}'.format(scheme, address, int(port))
-        self.__timeout = int(settings.get('timeout', 120))
-        self.__node_ids = set(settings.get('node_ids', []))
+        scheme = self.settings.get('scheme', 'http')
+        self.__ietf_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port))
+        self.__timeout = int(self.settings.get('timeout', 120))
+        self.__node_ids = set(self.settings.get('node_ids', []))
 
     def Connect(self) -> bool:
         url = self.__ietf_root + '/nmswebs/restconf/data/ietf-network:networks'
diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py
index 2399b9ac01258a21a4da6a9aa0e5bc09ea851951..ac67c4ab0d314adb3ce2af0aaffeda18e67334fc 100644
--- a/src/device/service/drivers/openconfig/OpenConfigDriver.py
+++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py
@@ -235,11 +235,13 @@ def edit_config(
             results = [e for _ in resources] # if commit fails, set exception in each resource
     return results
 
-METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'openconfig'})
+DRIVER_NAME = 'openconfig'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
 
 class OpenConfigDriver(_Driver):
-    def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called
-        self.__logger = logging.getLogger('{:s}:[{:s}:{:s}]'.format(str(__name__), str(address), str(port)))
+    def __init__(self, address : str, port : int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
+        self.__logger = logging.getLogger('{:s}:[{:s}:{:s}]'.format(str(__name__), str(self.address), str(self.port)))
         self.__lock = threading.Lock()
         #self.__initial = TreeNode('.')
         #self.__running = TreeNode('.')
@@ -249,11 +251,11 @@ class OpenConfigDriver(_Driver):
         self.__scheduler = BackgroundScheduler(daemon=True) # scheduler used to emulate sampling events
         self.__scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=1)},
+            executors = {'default': ThreadPoolExecutor(max_workers=1)}, # important! 1 = avoid concurrent requests
             job_defaults = {'coalesce': False, 'max_instances': 3},
             timezone=pytz.utc)
         self.__out_samples = queue.Queue()
-        self.__netconf_handler : NetconfSessionHandler = NetconfSessionHandler(address, port, **settings)
+        self.__netconf_handler = NetconfSessionHandler(self.address, self.port, **(self.settings))
         self.__samples_cache = SamplesCache(self.__netconf_handler, self.__logger)
 
     def Connect(self) -> bool:
diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py
index de47f49c05b0f344999382883233a12eceb43c1b..9577b9dad436929d9d9ee1804bcac47cf5c26f91 100644
--- a/src/device/service/drivers/p4/p4_driver.py
+++ b/src/device/service/drivers/p4/p4_driver.py
@@ -41,7 +41,8 @@ except ImportError:
 
 LOGGER = logging.getLogger(__name__)
 
-METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'p4'})
+DRIVER_NAME = 'p4'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
 
 class P4Driver(_Driver):
     """
@@ -80,7 +81,7 @@ class P4Driver(_Driver):
         self.__endpoint = None
         self.__settings = settings
         self.__id = None
-        self.__name = None
+        self.__name = DRIVER_NAME
         self.__vendor = P4_VAL_DEF_VENDOR
         self.__hw_version = P4_VAL_DEF_HW_VER
         self.__sw_version = P4_VAL_DEF_SW_VER
diff --git a/src/device/service/drivers/transport_api/TransportApiDriver.py b/src/device/service/drivers/transport_api/TransportApiDriver.py
index 1991a34d0d797c48b6c2296435c0ebd0f3a8125a..98ed8e6aae613ea45519143c89e72af32f3b2620 100644
--- a/src/device/service/drivers/transport_api/TransportApiDriver.py
+++ b/src/device/service/drivers/transport_api/TransportApiDriver.py
@@ -23,19 +23,21 @@ from .Tools import create_connectivity_service, find_key, config_getter, delete_
 
 LOGGER = logging.getLogger(__name__)
 
-METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'transport_api'})
+DRIVER_NAME = 'transport_api'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
 
 class TransportApiDriver(_Driver):
-    def __init__(self, address: str, port: int, **settings) -> None:    # pylint: disable=super-init-not-called
+    def __init__(self, address: str, port: int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
         self.__lock = threading.Lock()
         self.__started = threading.Event()
         self.__terminate = threading.Event()
-        username = settings.get('username')
-        password = settings.get('password')
+        username = self.settings.get('username')
+        password = self.settings.get('password')
         self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
-        scheme = settings.get('scheme', 'http')
-        self.__tapi_root = '{:s}://{:s}:{:d}'.format(scheme, address, int(port))
-        self.__timeout = int(settings.get('timeout', 120))
+        scheme = self.settings.get('scheme', 'http')
+        self.__tapi_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port))
+        self.__timeout = int(self.settings.get('timeout', 120))
 
     def Connect(self) -> bool:
         url = self.__tapi_root + '/restconf/data/tapi-common:context'
diff --git a/src/device/service/drivers/xr/XrDriver.py b/src/device/service/drivers/xr/XrDriver.py
index c1471a8136b0e5cd7791e019bb0bdafd2252f591..46269ff8904a0e20dbcb08202220412e64cb6283 100644
--- a/src/device/service/drivers/xr/XrDriver.py
+++ b/src/device/service/drivers/xr/XrDriver.py
@@ -33,21 +33,23 @@ urllib3.disable_warnings()
 
 LOGGER = logging.getLogger(__name__)
 
-METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'xr'})
+DRIVER_NAME = 'xr'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
 
 class XrDriver(_Driver):
-    def __init__(self, address: str, port: int, **settings) -> None:    # pylint: disable=super-init-not-called
+    def __init__(self, address: str, port: int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
         self.__lock = threading.Lock()
         self.__started = threading.Event()
         self.__terminate = threading.Event()
-        self.__timeout = int(settings.get('timeout', 120))
-        self.__cm_address = address
+        self.__timeout = int(self.settings.get('timeout', 120))
+        self.__cm_address = self.address
         # Mandatory key, an exception will get thrown if missing
-        self.__hub_module_name = settings["hub_module_name"]
+        self.__hub_module_name = self.settings["hub_module_name"]
 
         tls_verify = False # Currently using self signed certificates
-        username = settings.get("username", "xr-user-1")
-        password = settings.get("password", "xr-user-1")
+        username = self.settings.get("username", "xr-user-1")
+        password = self.settings.get("password", "xr-user-1")
 
         # Options are:
         #    disabled --> just import endpoints as usual
@@ -55,7 +57,7 @@ class XrDriver(_Driver):
         #                 (a remotely-controlled transport domain might exist between them)
         #    topology --> imports sub-devices and links connecting them.
         #                 (not supported by XR driver)
-        self.__import_topology = get_import_topology(settings, default=ImportTopologyEnum.DISABLED)
+        self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.DISABLED)
 
         # Options are:
         #    asynchronous --> operation considered complete when IPM responds with suitable status code,
@@ -64,12 +66,12 @@ class XrDriver(_Driver):
         #    lifecycle    --> operation is considered successfull once IPM has completed pluggaable configuration
         #                     or failed in it. This is typically unsuitable for production use
         #                     (as some optics may be transiently unreachable), but is convenient for demos and testin.
-        consistency_mode = ConsistencyMode.from_str(settings.get("consistency-mode", "asynchronous"))
+        consistency_mode = ConsistencyMode.from_str(self.settings.get("consistency-mode", "asynchronous"))
 
-        self.__cm_connection = CmConnection(address, int(port), username, password, self.__timeout, tls_verify = tls_verify, consistency_mode=consistency_mode)
+        self.__cm_connection = CmConnection(self.address, int(self.port), username, password, self.__timeout, tls_verify = tls_verify, consistency_mode=consistency_mode)
         self.__constellation = None
 
-        LOGGER.info(f"XrDriver instantiated, cm {address}:{port}, consistency mode {str(consistency_mode)}, {settings=}")
+        LOGGER.info(f"XrDriver instantiated, cm {self.address}:{self.port}, consistency mode {str(consistency_mode)}, {self.settings=}")
 
     def __str__(self):
         return f"{self.__hub_module_name}@{self.__cm_address}"
diff --git a/src/load_generator/command/__main__.py b/src/load_generator/command/__main__.py
index a97f081a32269ff824733b9a2a69be21bfb2004f..4fa2094e0fdc94b9665b2cfc86811e67809bcb5f 100644
--- a/src/load_generator/command/__main__.py
+++ b/src/load_generator/command/__main__.py
@@ -34,6 +34,8 @@ def main():
             RequestType.SLICE_L2NM,
             RequestType.SLICE_L3NM,
         ],
+        device_regex=r'.+',
+        endpoint_regex=r'.+',
         offered_load  = 50,
         holding_time  = 10,
         availability_ranges   = [[0.0, 99.9999]],
diff --git a/src/load_generator/load_gen/Parameters.py b/src/load_generator/load_gen/Parameters.py
index aca40cd3854fad203f15ce9b07a79715e9ea46f6..5bb7a9b725f955a4186a21201e439f9cfaa71324 100644
--- a/src/load_generator/load_gen/Parameters.py
+++ b/src/load_generator/load_gen/Parameters.py
@@ -20,16 +20,27 @@ from load_generator.tools.ListScalarRange import Type_ListScalarRange
 
 class Parameters:
     def __init__(
-        self, num_requests : int, request_types : List[str], offered_load : Optional[float] = None,
-        inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None,
+        self,
+        num_requests : int,
+        request_types : List[str],
+        device_regex : Optional[str] = None,
+        endpoint_regex : Optional[str] = None,
+        offered_load : Optional[float] = None,
+        inter_arrival_time : Optional[float] = None,
+        holding_time : Optional[float] = None,
         availability_ranges : Type_ListScalarRange = DEFAULT_AVAILABILITY_RANGES,
         capacity_gbps_ranges : Type_ListScalarRange = DEFAULT_CAPACITY_GBPS_RANGES,
         e2e_latency_ms_ranges : Type_ListScalarRange = DEFAULT_E2E_LATENCY_MS_RANGES,
-        max_workers : int = DEFAULT_MAX_WORKERS, do_teardown : bool = True, dry_mode : bool = False,
-        record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None
+        max_workers : int = DEFAULT_MAX_WORKERS,
+        do_teardown : bool = True,
+        dry_mode : bool = False,
+        record_to_dlt : bool = False,
+        dlt_domain_id : Optional[str] = None
     ) -> None:
         self._num_requests = num_requests
         self._request_types = request_types
+        self._device_regex = r'.*' if (device_regex is None or len(device_regex) == 0) else device_regex
+        self._endpoint_regex = r'.*' if (endpoint_regex is None or len(endpoint_regex) == 0) else endpoint_regex
         self._offered_load = offered_load
         self._inter_arrival_time = inter_arrival_time
         self._holding_time = holding_time
@@ -62,6 +73,12 @@ class Parameters:
     @property
     def request_types(self): return self._request_types
 
+    @property
+    def device_regex(self): return self._device_regex
+
+    @property
+    def endpoint_regex(self): return self._endpoint_regex
+
     @property
     def offered_load(self): return self._offered_load
 
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index 3a52b3b322bfefe60e7c5c8d3eed585b92b40353..fdd400a2110fd4a75d6f9e8cc4820bc943eef423 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -39,14 +39,6 @@ ROUTER_ID = {
     'R149': '5.5.5.5',
     'R155': '5.5.5.1',
     'R199': '5.5.5.6',
-
-}
-
-VIRTUAL_CIRCUIT = {
-    'R149': '5.5.5.5',
-    'R155': '5.5.5.1',
-    'R199': '5.5.5.6',
-
 }
 
 class RequestGenerator:
@@ -83,13 +75,21 @@ class RequestGenerator:
             if self._parameters.record_to_dlt:
                 dlt_domain_id = TopologyId(**json_topology_id('dlt-perf-eval'))
 
+            re_device = re.compile(r'^{:s}$'.format(self._parameters.device_regex))
+            re_endpoint = re.compile(r'^{:s}$'.format(self._parameters.endpoint_regex))
+
             devices = context_client.ListDevices(Empty())
             for device in devices.devices:
+                if self._parameters.record_to_dlt:
+                    record_device_to_dlt(dlt_connector_client, dlt_domain_id, device.device_id)
+
+                if re_device.match(device.name) is None: continue
                 device_uuid = device.device_id.device_uuid.uuid
                 self._device_data[device_uuid] = grpc_message_to_json(device)
 
                 _endpoints = self._available_device_endpoints.setdefault(device_uuid, set())
                 for endpoint in device.device_endpoints:
+                    if re_endpoint.match(endpoint.name) is None: continue
                     endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
                     endpoints = self._device_endpoint_data.setdefault(device_uuid, dict())
                     endpoints[endpoint_uuid] = grpc_message_to_json(endpoint)
@@ -98,12 +98,12 @@ class RequestGenerator:
                     _endpoints.add(endpoint_uuid)
                     self._endpoint_ids_to_types.setdefault((device_uuid, endpoint_uuid), endpoint_type)
                     self._endpoint_types_to_ids.setdefault(endpoint_type, set()).add((device_uuid, endpoint_uuid))
-                
-                if self._parameters.record_to_dlt:
-                    record_device_to_dlt(dlt_connector_client, dlt_domain_id, device.device_id)
 
             links = context_client.ListLinks(Empty())
             for link in links.links:
+                if self._parameters.record_to_dlt:
+                    record_link_to_dlt(dlt_connector_client, dlt_domain_id, link.link_id)
+
                 for endpoint_id in link.link_endpoint_ids:
                     device_uuid = endpoint_id.device_id.device_uuid.uuid
                     endpoint_uuid = endpoint_id.endpoint_uuid.uuid
@@ -119,9 +119,6 @@ class RequestGenerator:
                     endpoint_key = (device_uuid, endpoint_uuid)
                     if endpoint_key not in endpoints_for_type: continue
                     endpoints_for_type.discard(endpoint_key)
-            
-                    if self._parameters.record_to_dlt:
-                        record_link_to_dlt(dlt_connector_client, dlt_domain_id, link.link_id)
 
     def dump_state(self) -> None:
         with self._lock:
@@ -264,8 +261,8 @@ class RequestGenerator:
 
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
-            src_router_id = ROUTER_ID.get(src_device_name)
             src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
+            src_router_id = ROUTER_ID.get(src_device_name)
             if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
 
             dst_device_name = self._device_data[dst_device_uuid]['name']
@@ -317,8 +314,8 @@ class RequestGenerator:
 
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
-            src_router_id = ROUTER_ID.get(src_device_name)
             src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
+            src_router_id = ROUTER_ID.get(src_device_name)
             if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
             src_address_ip = '10.{:d}.{:d}.{:d}'.format(x, y, src_router_num)
 
diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
index 9f12f34920fda69ba55963876e96f51a8256537c..866f9f089662598b08c8dd03d04b01fd63108f5a 100644
--- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
+++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
@@ -37,6 +37,8 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
         self._parameters = LoadGen_Parameters(
             num_requests          = request.num_requests,
             request_types         = [REQUEST_TYPE_MAP[rt] for rt in request.request_types],
+            device_regex          = request.device_regex,
+            endpoint_regex        = request.endpoint_regex,
             offered_load          = request.offered_load if request.offered_load > 1.e-12 else None,
             holding_time          = request.holding_time if request.holding_time > 1.e-12 else None,
             inter_arrival_time    = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None,
@@ -79,6 +81,8 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
 
         stat_pars = status.parameters                               # pylint: disable=no-member
         stat_pars.num_requests       = params.num_requests          # pylint: disable=no-member
+        stat_pars.device_regex       = params.device_regex          # pylint: disable=no-member
+        stat_pars.endpoint_regex     = params.endpoint_regex        # pylint: disable=no-member
         stat_pars.offered_load       = params.offered_load          # pylint: disable=no-member
         stat_pars.holding_time       = params.holding_time          # pylint: disable=no-member
         stat_pars.inter_arrival_time = params.inter_arrival_time    # pylint: disable=no-member
diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py
index f928f07b94c71fb6f378161862e96d41af8bde7f..11574e8f6577db0bab4add96da8157496d40e6f5 100644
--- a/src/monitoring/service/MetricsDBTools.py
+++ b/src/monitoring/service/MetricsDBTools.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 import time
+import math
 from random import random
 
 from questdb.ingress import Sender, IngressError
@@ -326,4 +327,4 @@ class MetricsDB():
             else:
                 LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}")
         except (Exception) as e:
-            LOGGER.debug(f"Alarm data cannot be retrieved. {e}")
\ No newline at end of file
+            LOGGER.debug(f"Alarm data cannot be retrieved. {e}")
diff --git a/src/pathcomp/backend/pathComp_tools.h b/src/pathcomp/backend/pathComp_tools.h
index cac66f81c561502a6d93249f5e44a6195cb0f61b..84334eb5e1d47199e8a71bb09c3b541625d66af2 100644
--- a/src/pathcomp/backend/pathComp_tools.h
+++ b/src/pathcomp/backend/pathComp_tools.h
@@ -124,7 +124,7 @@ struct map_nodes_t {
     gint numMapNodes;
 };
 
-#define MAX_NUM_VERTICES				20 // 100 # LGR: reduced from 100 to 20 to divide by 5 the memory used
+#define MAX_NUM_VERTICES				100 // 100 # LGR: reduced from 100 to 20 to divide by 5 the memory used
 #define MAX_NUM_EDGES					5 // 100 # LGR: reduced from 100 to 5 to divide by 20 the memory used
 // Structures for the graph composition
 struct targetNodes_t {
@@ -249,7 +249,7 @@ struct endPoint_t {
 // Structure for the device contents
 ///////////////////////////////////////////////////////////////////
 #define MAX_DEV_TYPE_SIZE				128
-#define MAX_DEV_ENDPOINT_LENGTH			50	// 10 # LGR: controllers might have large number of endpoints
+#define MAX_DEV_ENDPOINT_LENGTH			100	// 10 # LGR: controllers might have large number of endpoints
 struct device_t {
 	gdouble power_idle; // power idle (baseline) of the switch in Watts
 	gint operational_status; // 0 - Undefined, 1 - Disabled, 2 - Enabled
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
index 40cb0857617983df4cfd926baebcbff85e169894..8ffdfaf3ed9d35b52e9c262a980e6e8e8fd234af 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
@@ -67,6 +67,7 @@ def convert_explicit_path_hops_to_connections(
     prv_res_class : Tuple[Optional[int], Optional[DeviceTypeEnum], Optional[str]] = None, None, None
 
     for path_hop in path_hops:
+        LOGGER.debug('path_hop={:s}'.format(str(path_hop)))
         device_uuid = path_hop['device']
         if prv_device_uuid == device_uuid: continue
         device_tuple = device_dict.get(device_uuid)
@@ -74,24 +75,33 @@ def convert_explicit_path_hops_to_connections(
         _,grpc_device = device_tuple
 
         res_class = get_resource_classification(grpc_device, device_dict)
-        if res_class[1] in IGNORED_DEVICE_TYPES: continue
+        LOGGER.debug('  prv_res_class={:s}'.format(str(prv_res_class)))
+        LOGGER.debug('  res_class={:s}'.format(str(res_class)))
+        if res_class[1] in IGNORED_DEVICE_TYPES:
+            LOGGER.debug('  ignored')
+            continue
 
         if prv_res_class[0] is None:
             # path ingress
+            LOGGER.debug('  path ingress')
             connection_stack.put((main_service_uuid, main_service_type, [path_hop], []))
         elif prv_res_class[0] > res_class[0]:
             # create underlying connection
+            LOGGER.debug('  create underlying connection')
             connection_uuid = str(uuid.uuid4())
             prv_service_type = connection_stack.queue[-1][1]
             service_type = get_service_type(res_class[1], prv_service_type)
             connection_stack.put((connection_uuid, service_type, [path_hop], []))
         elif prv_res_class[0] == res_class[0]:
             # same resource group kind
+            LOGGER.debug('  same resource group kind')
             if prv_res_class[1] == res_class[1] and prv_res_class[2] == res_class[2]:
                 # same device type and device controller: connection continues
+                LOGGER.debug('  connection continues')
                 connection_stack.queue[-1][2].append(path_hop)
             else:
                 # different device type or device controller: chain connections
+                LOGGER.debug('  chain connections')
                 connection = connection_stack.get()
                 connections.append(connection)
                 connection_stack.queue[-1][3].append(connection[0])
@@ -102,6 +112,7 @@ def convert_explicit_path_hops_to_connections(
                 connection_stack.put((connection_uuid, service_type, [path_hop], []))
         elif prv_res_class[0] < res_class[0]:
             # underlying connection ended
+            LOGGER.debug('  underlying connection ended')
             connection = connection_stack.get()
             connections.append(connection)
             connection_stack.queue[-1][3].append(connection[0])
@@ -113,6 +124,7 @@ def convert_explicit_path_hops_to_connections(
         prv_res_class = res_class
 
     # path egress
+    LOGGER.debug('  path egress')
     connections.append(connection_stack.get())
     LOGGER.debug('connections={:s}'.format(str(connections)))
     assert connection_stack.empty()
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
index 53c89cd124cb7d3431b37a50596b0b793cfa83eb..e56d436dd006197497d7774be598a480a134320c 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
@@ -33,12 +33,12 @@ DEVICE_TYPE_TO_DEEPNESS = {
     DeviceTypeEnum.EMULATED_P4_SWITCH.value              : 60,
     DeviceTypeEnum.P4_SWITCH.value                       : 60,
 
-    DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 40,
-    DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value          : 40,
-
     DeviceTypeEnum.EMULATED_XR_CONSTELLATION.value       : 40,
     DeviceTypeEnum.XR_CONSTELLATION.value                : 40,
 
+    DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 30,
+    DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value          : 30,
+
     DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value       : 30,
     DeviceTypeEnum.OPEN_LINE_SYSTEM.value                : 30,
 
diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py
index 96751e83770e1b98df4770cf74bb453f6a0519ef..acda45ce80a62a4a3723744546968e3195799b59 100644
--- a/src/service/service/task_scheduler/TaskExecutor.py
+++ b/src/service/service/task_scheduler/TaskExecutor.py
@@ -12,23 +12,28 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json
+import json, logging
 from enum import Enum
 from typing import TYPE_CHECKING, Any, Dict, Optional, Union
 from common.method_wrappers.ServiceExceptions import NotFoundException
-from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
+from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceDriverEnum, DeviceId, Service, ServiceId
 from common.tools.context_queries.Connection import get_connection_by_id
 from common.tools.context_queries.Device import get_device
 from common.tools.context_queries.Service import get_service_by_id
+from common.tools.grpc.Tools import grpc_message_list_to_json_string
 from common.tools.object_factory.Device import json_device_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
+from service.service.service_handler_api.Exceptions import (
+    UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException)
 from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class
 from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key
 
 if TYPE_CHECKING:
     from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 
+LOGGER = logging.getLogger(__name__)
+
 CacheableObject = Union[Connection, Device, Service]
 
 class CacheableObjectType(Enum):
@@ -169,5 +174,21 @@ class TaskExecutor:
         self, connection : Connection, service : Service, **service_handler_settings
     ) -> '_ServiceHandler':
         connection_devices = self.get_devices_from_connection(connection, exclude_managed_by_controller=True)
-        service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices)
-        return service_handler_class(service, self, **service_handler_settings)
+        try:
+            service_handler_class = get_service_handler_class(
+                self._service_handler_factory, service, connection_devices)
+            return service_handler_class(service, self, **service_handler_settings)
+        except (UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException):
+            dict_connection_devices = {
+                cd_data.name : (cd_uuid, cd_data.name, {
+                    (device_driver, DeviceDriverEnum.Name(device_driver))
+                    for device_driver in cd_data.device_drivers
+                })
+                for cd_uuid,cd_data in connection_devices.items()
+            }
+            LOGGER.exception(
+                'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}'.format(
+                    grpc_message_list_to_json_string(service), grpc_message_list_to_json_string(connection),
+                    str(dict_connection_devices)
+                )
+            )
diff --git a/src/tests/p4/setup.sh b/src/tests/p4/setup.sh
index 78e7f7372d911623cd541495ab15ad3cd548c3ef..a98ad31ab159217c209d5077b258fd398d5113cd 100755
--- a/src/tests/p4/setup.sh
+++ b/src/tests/p4/setup.sh
@@ -16,7 +16,7 @@
 
 export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}')
 
-kubectl exec ${POD_NAME} -n=tfs -- mkdir /root/p4
+kubectl exec ${POD_NAME} -n=tfs -c=server -- mkdir /root/p4
 
-kubectl cp src/tests/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4
-kubectl cp src/tests/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4
+kubectl cp src/tests/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 -c=server
+kubectl cp src/tests/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 -c=server
diff --git a/src/tests/tools/perf_plots/Component_RPC_Methods.py b/src/tests/tools/perf_plots/Component_RPC_Methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..7aa3ed304bc7d923a5ba634917fd95c28aea513b
--- /dev/null
+++ b/src/tests/tools/perf_plots/Component_RPC_Methods.py
@@ -0,0 +1,123 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Optional, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/component-rpcs'
+SERIES_MATCH   = 'tfs_.+_rpc_.+_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_(.+)_rpc_(.+)_histogram_duration_bucket$')
+SERIES_LABELS  = []
+
+SUBSYSTEMS_MAPPING = {
+    'context': {
+        'context'   : 'context',
+        'topolog'   : 'topology',
+        'device'    : 'device',
+        'endpoint'  : 'device',
+        'link'      : 'link',
+        'service'   : 'service',
+        'slice'     : 'slice',
+        'policyrule': 'policyrule',
+        'connection': 'connection',
+    }
+}
+
+def get_subsystem(component : str, rpc_method : str) -> Optional[str]:
+    return next(iter([
+        subsystem
+        for pattern,subsystem in SUBSYSTEMS_MAPPING.get(component, {}).items()
+        if pattern in rpc_method
+    ]), None)
+
+def update_keys(component : str, rpc_method : str) -> Tuple[Tuple, Tuple]:
+    subsystem = get_subsystem(component, rpc_method)
+    collection_keys = (component, subsystem)
+    histogram_keys = (rpc_method,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], component : str, subsystem : Optional[str]) -> Tuple[str, str]:
+    if subsystem is None:
+        title = '{:s} - RPC Methods [{:s}]'.format(component.title(), EXPERIMENT_NAME)
+        filepath = '{:s}/{:s}.png'.format(folders['png'], component)
+    else:
+        title = '{:s} - RPC Methods - {:s} [{:s}]'.format(component.title(), subsystem.title(), EXPERIMENT_NAME)
+        filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], component, subsystem)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/tests/tools/perf_plots/Device_Driver_Details.py b/src/tests/tools/perf_plots/Device_Driver_Details.py
new file mode 100644
index 0000000000000000000000000000000000000000..24b287cc826872f48acc8c24c4f51ecd7ba8c676
--- /dev/null
+++ b/src/tests/tools/perf_plots/Device_Driver_Details.py
@@ -0,0 +1,101 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Optional, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {
+    #'driver': 'emulated',
+    #'operation': 'configure_device', # add_device / configure_device
+    #'step': 'get_device',
+}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/dev-drv-details'
+SERIES_MATCH   = 'tfs_device_execution_details_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_device_execution_details_histogram_duration_bucket$')
+SERIES_LABELS  = ['driver', 'operation', 'step']
+
+def update_keys(driver : str, operation : str, step : str) -> Tuple[Tuple, Tuple]:
+    collection_keys = (driver, operation)
+    histogram_keys = (step,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], driver : str, operation : str) -> Tuple[str, str]:
+    title = 'Device Driver - {:s} - {:s}'.format(driver.title(), operation.replace('_', '').title())
+    filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], driver, operation)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/tests/tools/perf_plots/Device_Driver_Methods.py b/src/tests/tools/perf_plots/Device_Driver_Methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..a92bd13747f6c7c6aa861c989e9f1199ef3870d0
--- /dev/null
+++ b/src/tests/tools/perf_plots/Device_Driver_Methods.py
@@ -0,0 +1,99 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {
+    #'driver': 'emulated',
+}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/dev-drv-methods'
+SERIES_MATCH   = 'tfs_device_driver_.+_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_device_driver_(.+)_histogram_duration_bucket$')
+SERIES_LABELS  = ['driver']
+
+def update_keys(driver : str, method : str) -> Tuple[Tuple, Tuple]:
+    collection_keys = (driver,)
+    histogram_keys = (method,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], driver : str) -> Tuple[str, str]:
+    title = 'Device Driver - {:s}'.format(driver.title())
+    filepath = '{:s}/{:s}.png'.format(folders['png'], driver)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/tests/tools/perf_plots/README.md b/src/tests/tools/perf_plots/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..14dcb1c9508a1b63c62ae84aef4abe3e17589ef7
--- /dev/null
+++ b/src/tests/tools/perf_plots/README.md
@@ -0,0 +1,29 @@
+# Tool: Perf Plots Generator:
+
+Simple tool to gather performance data from Prometheus and produce histogram plots.
+
+## Example:
+
+- Ensure your MicroK8s includes the monitoring addon and your deployment specs the service monitors.
+
+- Deploy TeraFlowSDN controller with your specific settings:
+```(bash)
+cd ~/tfs-ctrl
+source my_deploy.sh 
+./deploy.sh 
+```
+
+- Execute the test you want to meter.
+
+- Select the appropriate script:
+    - Device_Driver_Methods   : To report Device Driver Methods
+    - Device_Driver_Details   : To report Device Add/Configure Details
+    - Service_Handler_Methods : To report Service Handler Methods
+    - Component_RPC_Methods   : To report Component RPC Methods
+
+- Tune the experiment settings
+
+- Execute the report script:
+```(bash)
+PYTHONPATH=./src python -m tests.tools.perf_plots.<script>
+```
diff --git a/src/tests/tools/perf_plots/Service_Handler_Methods.py b/src/tests/tools/perf_plots/Service_Handler_Methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..a57757274d518450672ccec5a08ef6afb4c527be
--- /dev/null
+++ b/src/tests/tools/perf_plots/Service_Handler_Methods.py
@@ -0,0 +1,99 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, re
+from typing import Dict, List, Tuple
+from .tools.FileSystem import create_folders
+from .tools.HistogramData import HistogramData
+from .tools.Plotter import plot_histogram
+from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
+from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
+
+##### EXPERIMENT SETTINGS ##############################################################################################
+
+EXPERIMENT_NAME = 'L2VPN with Emulated'
+EXPERIMENT_ID   = 'l2vpn-emu'
+TIME_START      = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_END        = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
+TIME_STEP       = '1m'
+LABEL_FILTERS   = {
+    #'handler': 'l2nm_emulated',
+}
+
+##### ENVIRONMENT SETTINGS #############################################################################################
+
+PROM_ADDRESS = '127.0.0.1'
+PROM_PORT    = 9090
+OUT_FOLDER   = 'data/perf/'
+
+##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
+
+EXPERIMENT_ID  += '/svc-hdl-methods'
+SERIES_MATCH   = 'tfs_service_handler_.+_histogram_duration_bucket'
+RE_SERIES_NAME = re.compile(r'^tfs_service_handler_(.+)_histogram_duration_bucket$')
+SERIES_LABELS  = ['handler']
+
+def update_keys(handler : str, method : str) -> Tuple[Tuple, Tuple]:
+    collection_keys = (handler,)
+    histogram_keys = (method,)
+    return collection_keys, histogram_keys
+
+def get_plot_specs(folders : Dict[str, str], handler : str) -> Tuple[str, str]:
+    title = 'Service Handler - {:s}'.format(handler.title())
+    filepath = '{:s}/{:s}.png'.format(folders['png'], handler)
+    return title, filepath
+
+##### AUTOMATED CODE ###################################################################################################
+
+def get_series_names(folders : Dict[str, str]) -> List[str]:
+    series_names = get_prometheus_series_names(
+        PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
+        raw_json_filepath='{:s}/_series.json'.format(folders['json'])
+    )
+    return series_names
+
+def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
+    m = RE_SERIES_NAME.match(series_name)
+    if m is None:
+        # pylint: disable=broad-exception-raised
+        raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
+    extra_labels = m.groups()
+    results = get_prometheus_range(
+        PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
+        raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
+    )
+    histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
+    unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
+    save_histograms(histograms, folders['csv'])
+    return histograms
+
+def main() -> None:
+    histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
+
+    folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
+    series_names = get_series_names(folders)
+
+    for series_name in series_names:
+        histograms = get_histogram_data(series_name, folders)
+        for histogram_keys, histogram_data in histograms.items():
+            collection_keys,histogram_keys = update_keys(*histogram_keys)
+            histograms = histograms_collection.setdefault(collection_keys, dict())
+            histograms[histogram_keys] = histogram_data
+
+    for histogram_keys,histograms in histograms_collection.items():
+        title, filepath = get_plot_specs(folders, *histogram_keys)
+        plot_histogram(histograms, filepath, title=title)
+
+if __name__ == '__main__':
+    main()
diff --git a/src/webui/service/policy/__init__.py b/src/tests/tools/perf_plots/__init__.py
similarity index 100%
rename from src/webui/service/policy/__init__.py
rename to src/tests/tools/perf_plots/__init__.py
diff --git a/src/tests/tools/perf_plots/tools/FileSystem.py b/src/tests/tools/perf_plots/tools/FileSystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..3af5dbc910be35b548a11d7a00ee79e604aa0927
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/FileSystem.py
@@ -0,0 +1,27 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pathlib
+from typing import Dict
+
+def create_folders(root_folder : str, experiment_id : str) -> Dict[str, str]:
+    experiment_folder = root_folder + '/' + experiment_id
+    folders = {
+        'csv'  : experiment_folder + '/csv' ,
+        'json' : experiment_folder + '/json',
+        'png'  : experiment_folder + '/png' ,
+    }
+    for folder in folders.values():
+        pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
+    return folders
diff --git a/src/tests/tools/perf_plots/tools/Histogram.py b/src/tests/tools/perf_plots/tools/Histogram.py
new file mode 100644
index 0000000000000000000000000000000000000000..0380b5bd21804cfc468a1f9bd19565337f76f741
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/Histogram.py
@@ -0,0 +1,88 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import csv
+from typing import Dict, List, Tuple
+from .HistogramData import HistogramData
+
+def results_to_histograms(
+    results : List[Dict], key_labels : List[str], extra_labels : List[str] = []
+) -> Dict[Tuple, HistogramData]:
+    histograms : Dict[Tuple, HistogramData] = dict()
+    for result in results:
+        metric : Dict = result['metric']
+        labels = [metric[l] for l in key_labels]
+        if len(extra_labels) > 0: labels.extend(extra_labels)
+        histogram_key = tuple(labels)
+        histogram = histograms.get(histogram_key)
+        if histogram is None:
+            histogram = histograms.setdefault(
+                histogram_key, HistogramData(timestamps=set(), bins=set(), data=dict()))
+        bin_ = float(metric['le'])
+        histogram.bins.add(bin_)
+    
+        values : List[Tuple[int, str]] = result['values']
+        for timestamp,count in values:
+            histogram.timestamps.add(timestamp)
+            histogram.data.setdefault(timestamp, dict())[bin_] = int(count)
+    return histograms
+
+def unaccumulate_histogram(
+    histogram : HistogramData, process_bins : bool = True, process_timestamps : bool = True
+) -> None:
+    timestamps = sorted(histogram.timestamps)
+    bins = sorted(histogram.bins)
+    accumulated_over_time = {b:0 for b in bins}
+    for timestamp in timestamps:
+        bin_to_count = histogram.data.get(timestamp)
+        if bin_to_count is None: continue
+
+        accumulated_over_bins = 0
+        for bin_ in bins:
+            count = bin_to_count[bin_]
+
+            if process_bins:
+                count -= accumulated_over_bins
+                accumulated_over_bins += count
+
+            if process_timestamps:
+                count -= accumulated_over_time[bin_]
+                accumulated_over_time[bin_] += count
+
+            bin_to_count[bin_] = count
+
+def unaccumulate_histograms(
+    histograms : Dict[Tuple, HistogramData], process_bins : bool = True, process_timestamps : bool = True
+) -> None:
+    for histogram in histograms.values():
+        unaccumulate_histogram(histogram, process_bins=process_bins, process_timestamps=process_timestamps)
+
+def save_histogram(filepath : str, histogram : HistogramData) -> None:
+    timestamps = sorted(histogram.timestamps)
+    bins = sorted(histogram.bins)
+    header = [''] + [str(b) for b in bins]
+    with open(filepath, 'w', encoding='UTF-8') as f:
+        writer = csv.writer(f)
+        writer.writerow(header)
+        for timestamp in timestamps:
+            bin_to_count = histogram.data.get(timestamp, {})
+            writer.writerow([timestamp] + [
+                str(bin_to_count.get(bin_, 0))
+                for bin_ in bins
+            ])
+
+def save_histograms(histograms : Dict[Tuple, HistogramData], data_folder : str) -> None:
+    for histogram_keys, histogram_data in histograms.items():
+        filepath = '{:s}/{:s}.csv'.format(data_folder, '__'.join(histogram_keys))
+        save_histogram(filepath, histogram_data)
diff --git a/src/tests/tools/perf_plots/tools/HistogramData.py b/src/tests/tools/perf_plots/tools/HistogramData.py
new file mode 100644
index 0000000000000000000000000000000000000000..7469853c636b089b9dff8473b34fb7ee6913d1aa
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/HistogramData.py
@@ -0,0 +1,22 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+from typing import Dict, Set
+
+@dataclass
+class HistogramData:
+    timestamps : Set[int]
+    bins       : Set[float]
+    data       : Dict[int, Dict[float, int]]
diff --git a/src/tests/tools/perf_plots/tools/Plotter.py b/src/tests/tools/perf_plots/tools/Plotter.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0ce92a7f91198bcac9692a55f3c122792b16b84
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/Plotter.py
@@ -0,0 +1,59 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import matplotlib.pyplot as plt
+from typing import Dict, Optional, Tuple
+from .HistogramData import HistogramData
+
+def plot_histogram(
+    histograms : Dict[Tuple, HistogramData], filepath : str,
+    title : Optional[str] = None, label_separator : str = ' ', dpi : int = 600,
+    legend_loc : str = 'best', grid : bool = True
+) -> None:
+
+    # plot the cumulative histogram
+    _, ax = plt.subplots(figsize=(8, 8))
+
+    num_series = 0
+    for histogram_keys, histogram_data in histograms.items():
+        bins = sorted(histogram_data.bins)
+
+        last_timestamp = max(histogram_data.timestamps)
+        counts = histogram_data.data.get(last_timestamp)
+        counts = [int(counts[bin_]) for bin_ in bins]
+        if sum(counts) == 0: continue
+        num_series += 1
+
+        bins.insert(0, 0)
+        bins = np.array(bins).astype(float)
+        counts = np.array(counts).astype(float)
+
+        assert len(bins) == len(counts) + 1
+        centroids = (bins[1:] + bins[:-1]) / 2
+
+        label = label_separator.join(histogram_keys)
+        ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True,
+                histtype='step', cumulative=True, label=label)
+
+    if num_series == 0: return
+
+    ax.grid(grid)
+    ax.legend(loc=legend_loc)
+    if title is not None: ax.set_title(title)
+    ax.set_xlabel('seconds')
+    ax.set_ylabel('Likelihood of occurrence')
+    plt.xscale('log')
+    plt.savefig(filepath, dpi=(dpi))
+    plt.show()
diff --git a/src/tests/tools/perf_plots/tools/Prometheus.py b/src/tests/tools/perf_plots/tools/Prometheus.py
new file mode 100644
index 0000000000000000000000000000000000000000..60a06b202e5b9710bb814974360fd6d8e4580cc4
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/Prometheus.py
@@ -0,0 +1,59 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, requests, time
+from datetime import datetime
+from typing import Dict, List, Optional
+
+def get_prometheus_series_names(
+    address : str, port : int, metric_match : str, time_start : datetime, time_end : datetime, timeout : int = 10,
+    raw_json_filepath : Optional[str] = None
+) -> List[str]:
+    str_url = 'http://{:s}:{:d}/api/v1/label/__name__/values'.format(address, port)
+    params = {
+        'match[]': '{{__name__=~"{:s}"}}'.format(metric_match),
+        'start': time.mktime(time_start.timetuple()),
+        'end'  : time.mktime(time_end.timetuple()),
+    }
+    response = requests.get(str_url, params=params, timeout=timeout)
+    results = response.json()
+    if raw_json_filepath is not None:
+        with open(raw_json_filepath, 'w', encoding='UTF-8') as f:
+            f.write(json.dumps(results, sort_keys=True))
+    assert results['status'] == 'success'
+    return results['data']
+
+def get_prometheus_range(
+    address : str, port : int, metric_name : str, labels : Dict[str, str], time_start : datetime, time_end : datetime,
+    time_step : str, timeout : int = 10, raw_json_filepath : Optional[str] = None
+) -> List[Dict]:
+    str_url = 'http://{:s}:{:d}/api/v1/query_range'.format(address, port)
+    str_query = metric_name
+    if len(labels) > 0:
+        str_labels = ', '.join(['{:s}="{:s}"'.format(name, value) for name,value in labels.items()])
+        str_query += '{{{:s}}}'.format(str_labels)
+    params = {
+        'query': str_query,
+        'start': time.mktime(time_start.timetuple()),
+        'end'  : time.mktime(time_end.timetuple()),
+        'step' : time_step,
+    }
+    response = requests.get(str_url, params=params, timeout=timeout)
+    results = response.json()
+    if raw_json_filepath is not None:
+        with open(raw_json_filepath, 'w', encoding='UTF-8') as f:
+            f.write(json.dumps(results, sort_keys=True))
+    assert results['status'] == 'success'
+    assert results['data']['resultType'] == 'matrix'
+    return results['data']['result']
diff --git a/src/tests/tools/perf_plots/tools/__init__.py b/src/tests/tools/perf_plots/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/tests/tools/perf_plots/tools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/webui/grafana_prom_device_exec_details.json b/src/webui/grafana_prom_device_exec_details.json
new file mode 100644
index 0000000000000000000000000000000000000000..a18c2b91cabd942dd5908ff3e6b3966528e62fa0
--- /dev/null
+++ b/src/webui/grafana_prom_device_exec_details.json
@@ -0,0 +1,258 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": {
+            "type": "datasource",
+            "uid": "grafana"
+          },
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "target": {
+            "limit": 100,
+            "matchAny": false,
+            "tags": [],
+            "type": "dashboard"
+          },
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "fiscalYearStartMonth": 0,
+    "graphTooltip": 0,
+    "id": null,
+    "iteration": 1683036452435,
+    "links": [],
+    "liveNow": false,
+    "panels": [
+      {
+        "cards": {},
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "linear",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": {
+          "type": "prometheus",
+          "uid": "prometheus"
+        },
+        "gridPos": {
+          "h": 22,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "heatmap": {},
+        "hideZeroBuckets": true,
+        "highlightCards": true,
+        "id": 2,
+        "interval": "60s",
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "editorMode": "code",
+            "exemplar": true,
+            "expr": "sum(\r\n    max_over_time(tfs_device_execution_details_histogram_duration_bucket{driver=~\"[[driver]]\", operation=~\"[[operation]]\", step=~\"[[step]]\"}[1m]) -\r\n    min_over_time(tfs_device_execution_details_histogram_duration_bucket{driver=~\"[[driver]]\", operation=~\"[[operation]]\", step=~\"[[step]]\"}[1m])\r\n) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "1m",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "range": true,
+            "refId": "A"
+          }
+        ],
+        "title": "Histogram",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "yAxis": {
+          "format": "s",
+          "logBase": 1,
+          "show": true
+        },
+        "yBucketBound": "auto"
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 36,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, operation)",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Operation",
+          "multi": true,
+          "name": "operation",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, operation)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, driver)",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Driver",
+          "multi": true,
+          "name": "driver",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, driver)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, step)",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Step",
+          "multi": true,
+          "name": "step",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, step)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "label_values(tfs_device_execution_details_histogram_duration_bucket, pod)",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Pod",
+          "multi": true,
+          "name": "pod",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_execution_details_histogram_duration_bucket, pod)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / Device Execution Details",
+    "uid": "tfs-dev-exec",
+    "version": 4,
+    "weekStart": ""
+  }
+}
diff --git a/src/webui/grafana_prom_device_config_exec_details.json b/src/webui/old/grafana_prom_device_config_exec_details.json
similarity index 100%
rename from src/webui/grafana_prom_device_config_exec_details.json
rename to src/webui/old/grafana_prom_device_config_exec_details.json
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index e7f50ed42d19921b3423617f6860b5630e93adba..3c64f45c90457e1b6a9553e60634879a28910a31 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -95,8 +95,8 @@ def create_app(use_config=None, web_app_root=None):
     from webui.service.link.routes import link              # pylint: disable=import-outside-toplevel
     app.register_blueprint(link)
 
-    from webui.service.policy.routes import policy          # pylint: disable=import-outside-toplevel
-    app.register_blueprint(policy)
+    from webui.service.policy_rule.routes import policy_rule # pylint: disable=import-outside-toplevel
+    app.register_blueprint(policy_rule)
 
     app.jinja_env.globals.update({              # pylint: disable=no-member
         'enumerate'           : enumerate,
diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py
index 0fda8958e2ab2609969d2c1f68aaae61b7360b68..3ab320d8b13d037e195f00ced9eb63bd14ecc0dd 100644
--- a/src/webui/service/link/routes.py
+++ b/src/webui/service/link/routes.py
@@ -13,8 +13,8 @@
 # limitations under the License.
 
 
-from flask import render_template, Blueprint, flash, session, redirect, url_for
-from common.proto.context_pb2 import Empty, Link, LinkList
+from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for
+from common.proto.context_pb2 import Empty, Link, LinkId, LinkList
 from common.tools.context_queries.EndPoint import get_endpoint_names
 from common.tools.context_queries.Link import get_link
 from common.tools.context_queries.Topology import get_topology
@@ -65,3 +65,25 @@ def detail(link_uuid: str):
         device_names, endpoints_data = get_endpoint_names(context_client, link_obj.link_endpoint_ids)
     context_client.close()
     return render_template('link/detail.html',link=link_obj, device_names=device_names, endpoints_data=endpoints_data)
+
+@link.get('<path:link_uuid>/delete')
+def delete(link_uuid):
+    try:
+
+        # first, check if link exists!
+        # request: LinkId = LinkId()
+        # request.link_uuid.uuid = link_uuid
+        # response: Link = client.GetLink(request)
+        # TODO: finalize implementation
+
+        request = LinkId()
+        request.link_uuid.uuid = link_uuid # pylint: disable=no-member
+        context_client.connect()
+        context_client.RemoveLink(request)
+        context_client.close()
+
+        flash(f'Link "{link_uuid}" deleted successfully!', 'success')
+    except Exception as e: # pylint: disable=broad-except
+        flash(f'Problem deleting link "{link_uuid}": {e.details()}', 'danger')
+        current_app.logger.exception(e)
+    return redirect(url_for('link.home'))
diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py
index e0d11800cf9fbd9b0e195de7aa85eede272fe28e..4c5c095cd0ca7b0549a14be394e517694d9b3268 100644
--- a/src/webui/service/load_gen/forms.py
+++ b/src/webui/service/load_gen/forms.py
@@ -21,6 +21,8 @@ DEFAULT_AVAILABILITY   = '0.0..99.9999'
 DEFAULT_CAPACITY_GBPS  = '0.1..100.00' #'10, 40, 50, 100, 400'
 DEFAULT_E2E_LATENCY_MS = '5.0..100.00'
 
+DEFAULT_REGEX = r'.+'
+
 class LoadGenForm(FlaskForm):
     num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)])
     num_generated = IntegerField('Num Generated', default=0, render_kw={'readonly': True})
@@ -33,6 +35,9 @@ class LoadGenForm(FlaskForm):
     request_type_slice_l2nm = BooleanField('Slice L2NM', default=True)
     request_type_slice_l3nm = BooleanField('Slice L3NM', default=False)
 
+    device_regex = StringField('Device selector [regex]', default=DEFAULT_REGEX)
+    endpoint_regex = StringField('Endpoint selector [regex]', default=DEFAULT_REGEX)
+
     offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=0.0)])
     holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)])
     inter_arrival_time = FloatField('Inter Arrival Time [seconds]', default=0, validators=[NumberRange(min=0.0)])
diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py
index f05f57f6d5aab83c0752dda15e0b858c9a4d53a3..3483c2a65d08f3da18b2f630dbf7a59ac0f22ecb 100644
--- a/src/webui/service/load_gen/routes.py
+++ b/src/webui/service/load_gen/routes.py
@@ -62,27 +62,29 @@ def home():
     _e2e_latency_ms     = list_scalar_range__grpc_to_str(status.parameters.e2e_latency_ms)
 
     form = LoadGenForm()
-    set_properties(form.num_requests             , status.parameters.num_requests , readonly=status.running)
-    set_properties(form.offered_load             , _offered_load                  , readonly=status.running)
-    set_properties(form.holding_time             , _holding_time                  , readonly=status.running)
-    set_properties(form.inter_arrival_time       , _inter_arrival_time            , readonly=status.running)
-    set_properties(form.availability             , _availability                  , readonly=status.running)
-    set_properties(form.capacity_gbps            , _capacity_gbps                 , readonly=status.running)
-    set_properties(form.e2e_latency_ms           , _e2e_latency_ms                , readonly=status.running)
-    set_properties(form.max_workers              , status.parameters.max_workers  , readonly=status.running)
-    set_properties(form.do_teardown              , status.parameters.do_teardown  , disabled=status.running)
-    set_properties(form.record_to_dlt            , status.parameters.record_to_dlt, disabled=status.running)
-    set_properties(form.dlt_domain_id            , status.parameters.dlt_domain_id, readonly=status.running)
-    set_properties(form.request_type_service_l2nm, _request_type_service_l2nm     , disabled=status.running)
-    set_properties(form.request_type_service_l3nm, _request_type_service_l3nm     , disabled=status.running)
-    set_properties(form.request_type_service_mw  , _request_type_service_mw       , disabled=status.running)
-    set_properties(form.request_type_service_tapi, _request_type_service_tapi     , disabled=status.running)
-    set_properties(form.request_type_slice_l2nm  , _request_type_slice_l2nm       , disabled=status.running)
-    set_properties(form.request_type_slice_l3nm  , _request_type_slice_l3nm       , disabled=status.running)
-    set_properties(form.num_generated            , status.num_generated           , disabled=True)
-    set_properties(form.num_released             , status.num_released            , disabled=True)
-    set_properties(form.infinite_loop            , status.infinite_loop           , disabled=True)
-    set_properties(form.running                  , status.running                 , disabled=True)
+    set_properties(form.num_requests             , status.parameters.num_requests  , readonly=status.running)
+    set_properties(form.device_regex             , status.parameters.device_regex  , readonly=status.running)
+    set_properties(form.endpoint_regex           , status.parameters.endpoint_regex, readonly=status.running)
+    set_properties(form.offered_load             , _offered_load                   , readonly=status.running)
+    set_properties(form.holding_time             , _holding_time                   , readonly=status.running)
+    set_properties(form.inter_arrival_time       , _inter_arrival_time             , readonly=status.running)
+    set_properties(form.availability             , _availability                   , readonly=status.running)
+    set_properties(form.capacity_gbps            , _capacity_gbps                  , readonly=status.running)
+    set_properties(form.e2e_latency_ms           , _e2e_latency_ms                 , readonly=status.running)
+    set_properties(form.max_workers              , status.parameters.max_workers   , readonly=status.running)
+    set_properties(form.do_teardown              , status.parameters.do_teardown   , disabled=status.running)
+    set_properties(form.record_to_dlt            , status.parameters.record_to_dlt , disabled=status.running)
+    set_properties(form.dlt_domain_id            , status.parameters.dlt_domain_id , readonly=status.running)
+    set_properties(form.request_type_service_l2nm, _request_type_service_l2nm      , disabled=status.running)
+    set_properties(form.request_type_service_l3nm, _request_type_service_l3nm      , disabled=status.running)
+    set_properties(form.request_type_service_mw  , _request_type_service_mw        , disabled=status.running)
+    set_properties(form.request_type_service_tapi, _request_type_service_tapi      , disabled=status.running)
+    set_properties(form.request_type_slice_l2nm  , _request_type_slice_l2nm        , disabled=status.running)
+    set_properties(form.request_type_slice_l3nm  , _request_type_slice_l3nm        , disabled=status.running)
+    set_properties(form.num_generated            , status.num_generated            , disabled=True)
+    set_properties(form.num_released             , status.num_released             , disabled=True)
+    set_properties(form.infinite_loop            , status.infinite_loop            , disabled=True)
+    set_properties(form.running                  , status.running                  , disabled=True)
 
     form.submit.label.text = 'Stop' if status.running else 'Start'
     form_action = url_for('load_gen.stop') if status.running else url_for('load_gen.start')
@@ -99,6 +101,8 @@ def start():
 
             load_gen_params = Parameters()
             load_gen_params.num_requests       = form.num_requests.data
+            load_gen_params.device_regex       = form.device_regex.data
+            load_gen_params.endpoint_regex     = form.endpoint_regex.data
             load_gen_params.offered_load       = form.offered_load.data
             load_gen_params.holding_time       = form.holding_time.data
             load_gen_params.inter_arrival_time = form.inter_arrival_time.data
diff --git a/src/webui/service/policy_rule/__init__.py b/src/webui/service/policy_rule/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/webui/service/policy_rule/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/webui/service/policy/routes.py b/src/webui/service/policy_rule/routes.py
similarity index 62%
rename from src/webui/service/policy/routes.py
rename to src/webui/service/policy_rule/routes.py
index 6d14f86b4f1428695b474b3f2e2dd4dc72657452..5a99cf8b2f6ce71c571c9d31a2118a5390ee7d15 100644
--- a/src/webui/service/policy/routes.py
+++ b/src/webui/service/policy_rule/routes.py
@@ -18,33 +18,33 @@ from common.proto.context_pb2 import Empty
 from common.proto.policy_pb2 import PolicyRuleStateEnum
 from context.client.ContextClient import ContextClient
 
-policy = Blueprint('policy', __name__, url_prefix='/policy')
+policy_rule = Blueprint('policy_rule', __name__, url_prefix='/policy_rule')
 
 context_client = ContextClient()
 
-@policy.get('/')
+@policy_rule.get('/')
 def home():
     context_client.connect()
     policy_rules = context_client.ListPolicyRules(Empty())
     policy_rules = policy_rules.policyRules
     context_client.close()
-    return render_template('policy/home.html', policy_rules=policy_rules, prse=PolicyRuleStateEnum)
+    return render_template('policy_rule/home.html', policy_rules=policy_rules, prse=PolicyRuleStateEnum)
 
-#@policy.get('<path:policy_uuid>/detail')
-#def detail(policy_uuid: str):
+#@policy_rule.get('<path:policy_rule_uuid>/detail')
+#def detail(policy_rule_uuid: str):
 #    try:
 #        context_client.connect()
 #
-#        slice_obj = get_slice_by_uuid(context_client, slice_uuid, rw_copy=False)
-#        if slice_obj is None:
-#            flash('Context({:s})/Slice({:s}) not found'.format(str(context_uuid), str(slice_uuid)), 'danger')
-#            slice_obj = Slice()
+#        policy_rule_obj = get_policy_rule_by_uuid(context_client, policy_rule_uuid, rw_copy=False)
+#        if policy_rule_obj is None:
+#            flash('Context({:s})/PolicyRule({:s}) not found'.format(str(context_uuid), str(policy_rule_uuid)), 'danger')
+#            policy_rule_obj = PolicyRule()
 #
 #        context_client.close()
 #
 #        return render_template(
-#            'slice/detail.html', slice=slice_obj, prse=PolicyRuleStateEnum)
+#            'policy_rule/detail.html', policy_rule=policy_rule_obj, prse=PolicyRuleStateEnum)
 #    except Exception as e:
-#        flash('The system encountered an error and cannot show the details of this slice.', 'warning')
+#        flash('The system encountered an error and cannot show the details of this policy_rule.', 'warning')
 #        current_app.logger.exception(e)
-#        return redirect(url_for('slice.home'))
+#        return redirect(url_for('policy_rule.home'))
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 61c283b0d957b4d13b7cc57e47d3ea2675ab76f0..4c31b61935aca2bd7d2a5e7642168afdea6fd02d 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -84,10 +84,10 @@
                   {% endif %}
                 </li>
                 <li class="nav-item">
-                  {% if '/policy/' in request.path %}
-                  <a class="nav-link active" aria-current="page" href="{{ url_for('policy.home') }}">Policy</a>
+                  {% if '/policy_rule/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('policy_rule.home') }}">Policy Rules</a>
                   {% else %}
-                  <a class="nav-link" href="{{ url_for('policy.home') }}">Policy</a>
+                  <a class="nav-link" href="{{ url_for('policy_rule.home') }}">Policy Rules</a>
                   {% endif %}
                 </li>
                 <li class="nav-item">
@@ -177,4 +177,4 @@
       <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/js/bootstrap.min.js" integrity="sha384-PsUw7Xwds7x08Ew3exXhqzbhuEYmA2xnwc8BuD6SEr+UmEHlX8/MCltYEodzWA4u" crossorigin="anonymous"></script>
       -->
     </body>
-  </html>
\ No newline at end of file
+  </html>
diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html
index 916abafde05b3ec990346ff7966f207b1dafc10a..8ca7faee3e1871d11b819c6ca95668e654041f8c 100644
--- a/src/webui/service/templates/link/detail.html
+++ b/src/webui/service/templates/link/detail.html
@@ -13,62 +13,92 @@
     See the License for the specific language governing permissions and
     limitations under the License.
    -->
-   {% extends 'base.html' %}
-   
-   {% block content %}
-    <h1>Link {{ link.name }} ({{ link.link_id.link_uuid.uuid }})</h1>
-    <div class="row mb-3">
-          <div class="col-sm-3">
-               <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('link.home') }}'">
-                    <i class="bi bi-box-arrow-in-left"></i>
-                    Back to link list
-               </button>
-          </div>
-     </div>
 
-     <br>
-       <div class="row mb-3">
-            <div class="col-sm-4">
-               <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br>
-               <b>Name: </b>{{ link.name }}<br>
-           </div>
-            <div class="col-sm-8">
-                    <table class="table table-striped table-hover">
-                        <thead>
-                            <tr>
-                                <th scope="col">Endpoint UUID</th>
-                                <th scope="col">Name</th>
-                                <th scope="col">Device</th>
-                                <th scope="col">Endpoint Type</th>
-                            </tr>
-                        </thead>
-                        <tbody>
-                              {% for endpoint in link.link_endpoint_ids %}
-                              <tr>
-                                   <td>
-                                        {{ endpoint.endpoint_uuid.uuid }}
-                                   </td>
-                                   <td>
-                                        {{ endpoints_data.get(endpoint.endpoint_uuid.uuid, (endpoint.endpoint_uuid.uuid, ''))[0] }}
-                                   </td>
-                                   <td>
-                                        <a href="{{ url_for('device.detail', device_uuid=endpoint.device_id.device_uuid.uuid) }}">
-                                             {{ device_names.get(endpoint.device_id.device_uuid.uuid, endpoint.device_id.device_uuid.uuid) }}
-                                             <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
-                                                 <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
-                                                 <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
-                                             </svg>
-                                        </a>
-                                   </td>
-                                   <td>
-                                        {{ endpoints_data.get(endpoint.endpoint_uuid.uuid, ('', '-'))[1] }}
-                                   </td>
-                              </tr>
-                              {% endfor %}
-                        </tbody>
-                    </table>
+{% extends 'base.html' %}
+
+{% block content %}
+<h1>Link {{ link.name }} ({{ link.link_id.link_uuid.uuid }})</h1>
+<div class="row mb-3">
+    <div class="col-sm-3">
+        <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('link.home') }}'">
+            <i class="bi bi-box-arrow-in-left"></i>
+            Back to link list
+        </button>
+    </div>
+    <div class="col-sm-3">
+        <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete link</button> -->
+        <button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
+            <i class="bi bi-x-square"></i>
+            Delete link
+        </button>
+    </div>
+</div>
+
+<br>
+<div class="row mb-3">
+    <div class="col-sm-4">
+        <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br>
+        <b>Name: </b>{{ link.name }}<br>
+    </div>
+    <div class="col-sm-8">
+        <table class="table table-striped table-hover">
+            <thead>
+                <tr>
+                    <th scope="col">Endpoint UUID</th>
+                    <th scope="col">Name</th>
+                    <th scope="col">Device</th>
+                    <th scope="col">Endpoint Type</th>
+                </tr>
+            </thead>
+            <tbody>
+                {% for endpoint in link.link_endpoint_ids %}
+                <tr>
+                    <td>
+                        {{ endpoint.endpoint_uuid.uuid }}
+                    </td>
+                    <td>
+                        {{ endpoints_data.get(endpoint.endpoint_uuid.uuid, (endpoint.endpoint_uuid.uuid, ''))[0] }}
+                    </td>
+                    <td>
+                        <a href="{{ url_for('device.detail', device_uuid=endpoint.device_id.device_uuid.uuid) }}">
+                            {{ device_names.get(endpoint.device_id.device_uuid.uuid, endpoint.device_id.device_uuid.uuid) }}
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                    <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                    <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                            </svg>
+                        </a>
+                    </td>
+                    <td>
+                        {{ endpoints_data.get(endpoint.endpoint_uuid.uuid, ('', '-'))[1] }}
+                    </td>
+                </tr>
+                {% endfor %}
+            </tbody>
+        </table>
+    </div>
+</div>
+
+
+<!-- Modal -->
+<div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1"
+    aria-labelledby="staticBackdropLabel" aria-hidden="true">
+    <div class="modal-dialog">
+        <div class="modal-content">
+            <div class="modal-header">
+                <h5 class="modal-title" id="staticBackdropLabel">Delete link?</h5>
+                <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
+            </div>
+            <div class="modal-body">
+                Are you sure you want to delete the link "{{ link.link_id.link_uuid.uuid }}"?
+            </div>
+            <div class="modal-footer">
+                <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
+                <a type="button" class="btn btn-danger"
+                    href="{{ url_for('link.delete', link_uuid=link.link_id.link_uuid.uuid) }}"><i
+                        class="bi bi-exclamation-diamond"></i>Yes</a>
             </div>
         </div>
+    </div>
+</div>
 
-   {% endblock %}
-   
\ No newline at end of file
+{% endblock %}
diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html
index 5bedf66fad1fa2d1b5e38e3866acd95347c9559b..cec0a38dba2b4b31d4d07d391e4ce211f0c7ac76 100644
--- a/src/webui/service/templates/load_gen/home.html
+++ b/src/webui/service/templates/load_gen/home.html
@@ -83,6 +83,36 @@
             </div>
             <br />
 
+            <div class="row mb-3">
+                {{ form.device_regex.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.device_regex.errors %}
+                        {{ form.device_regex(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.device_regex.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.device_regex(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.endpoint_regex.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.endpoint_regex.errors %}
+                        {{ form.endpoint_regex(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.endpoint_regex.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.endpoint_regex(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
             <div class="row mb-3">
                 {{ form.offered_load.label(class="col-sm-2 col-form-label") }}
                 <div class="col-sm-10">
diff --git a/src/webui/service/templates/policy/home.html b/src/webui/service/templates/policy/home.html
deleted file mode 100644
index 081a7f0b5291346633a2f682ba4552b5c1e362fb..0000000000000000000000000000000000000000
--- a/src/webui/service/templates/policy/home.html
+++ /dev/null
@@ -1,84 +0,0 @@
-<!--
- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-{% extends 'base.html' %}
-
-{% block content %}
-    <h1>Policy</h1>
-
-    <div class="row">
-        <div class="col">
-            {{ policies | length }} policies found in context <i>{{ session['context_uuid'] }}</i>
-        </div>
-    </div>
-
-    <table class="table table-striped table-hover">
-        <thead>
-          <tr>
-            <th scope="col">UUID</th>
-            <th scope="col">Kind</th>
-            <th scope="col">Priority</th>
-            <th scope="col">Condition</th>
-            <th scope="col">Operator</th>
-            <th scope="col">Action</th>
-            <th scope="col">Service</th>
-            <th scope="col">Devices</th>
-            <th scope="col">State</th>
-            <th scope="col">Message</th>
-            <th scope="col">Extra</th>
-            <th scope="col"></th>
-          </tr>
-        </thead>
-        <tbody>
-            {% if policies %}
-                {% for policy in policies %}
-                    {% if policy.WhichOneof('policy_rule') == 'device' %}
-                        <tr>
-                            <td>{{ policy.device.policyRuleBasic.policyRuleId.uuid }}</td>
-                            <td>{{ policy.WhichOneof('policy_rule') }}</td>
-                            <td>{{ policy.device.policyRuleBasic.priority }}</td>
-                            <td>{{ policy.device.policyRuleBasic.conditionList }}</td>
-                            <td>{{ policy.device.policyRuleBasic.booleanOperator }}</td>
-                            <td>{{ policy.device.policyRuleBasic.actionList }}</td>
-                            <td>-</td>
-                            <td>{{ policy.device.deviceList }}</td>
-                            <td>{{ prse.Name(policy.device.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td>
-                            <td>{{ policy.device.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td>
-                        </tr>
-                    {% elif policy.WhichOneof('policy_rule') == 'service' %}
-                        <tr>
-                            <td>{{ policy.service.policyRuleBasic.policyRuleId.uuid }}</td>
-                            <td>{{ policy.WhichOneof('policy_rule') }}</td>
-                            <td>{{ policy.service.policyRuleBasic.priority }}</td>
-                            <td>{{ policy.service.policyRuleBasic.conditionList }}</td>
-                            <td>{{ policy.service.policyRuleBasic.booleanOperator }}</td>
-                            <td>{{ policy.service.policyRuleBasic.actionList }}</td>
-                            <td>{{ policy.service.serviceId }}</td>
-                            <td>{{ policy.service.deviceList }}</td>
-                            <td>{{ prse.Name(policy.service.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td>
-                            <td>{{ policy.service.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td>
-                        </tr>
-                    {% else %}
-                        <tr><td colspan="11">Unsupported policy type {{ policy.WhichOneof('policy_rule') }}</td></tr>
-                    {% endif %}
-                {% endfor %}
-            {% else %}
-                <tr><td colspan="11">No policies found</td></tr>
-            {% endif %}
-        </tbody>
-    </table>
-
-{% endblock %}
diff --git a/src/webui/service/templates/policy_rule/home.html b/src/webui/service/templates/policy_rule/home.html
new file mode 100644
index 0000000000000000000000000000000000000000..c63807a6aad046d8312a07bbb412c541c5e06bc8
--- /dev/null
+++ b/src/webui/service/templates/policy_rule/home.html
@@ -0,0 +1,84 @@
+<!--
+ Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+    <h1>Policy Rules</h1>
+
+    <div class="row">
+        <div class="col">
+            {{ policy_rules | length }} policy rules found in context <i>{{ session['context_uuid'] }}</i>
+        </div>
+    </div>
+
+    <table class="table table-striped table-hover">
+        <thead>
+          <tr>
+            <th scope="col">UUID</th>
+            <th scope="col">Kind</th>
+            <th scope="col">Priority</th>
+            <th scope="col">Condition</th>
+            <th scope="col">Operator</th>
+            <th scope="col">Action</th>
+            <th scope="col">Service</th>
+            <th scope="col">Devices</th>
+            <th scope="col">State</th>
+            <th scope="col">Message</th>
+            <th scope="col">Extra</th>
+            <th scope="col"></th>
+          </tr>
+        </thead>
+        <tbody>
+            {% if policy_rules %}
+                {% for policy_rule in policy_rules %}
+                    {% if policy_rule.WhichOneof('policy_rule') == 'device' %}
+                        <tr>
+                            <td>{{ policy_rule.device.policyRuleBasic.policyRuleId.uuid }}</td>
+                            <td>{{ policy_rule.WhichOneof('policy_rule') }}</td>
+                            <td>{{ policy_rule.device.policyRuleBasic.priority }}</td>
+                            <td>{{ policy_rule.device.policyRuleBasic.conditionList }}</td>
+                            <td>{{ policy_rule.device.policyRuleBasic.booleanOperator }}</td>
+                            <td>{{ policy_rule.device.policyRuleBasic.actionList }}</td>
+                            <td>-</td>
+                            <td>{{ policy_rule.device.deviceList }}</td>
+                            <td>{{ prse.Name(policy_rule.device.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td>
+                            <td>{{ policy_rule.device.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td>
+                        </tr>
+                    {% elif policy_rule.WhichOneof('policy_rule') == 'service' %}
+                        <tr>
+                            <td>{{ policy_rule.service.policyRuleBasic.policyRuleId.uuid }}</td>
+                            <td>{{ policy_rule.WhichOneof('policy_rule') }}</td>
+                            <td>{{ policy_rule.service.policyRuleBasic.priority }}</td>
+                            <td>{{ policy_rule.service.policyRuleBasic.conditionList }}</td>
+                            <td>{{ policy_rule.service.policyRuleBasic.booleanOperator }}</td>
+                            <td>{{ policy_rule.service.policyRuleBasic.actionList }}</td>
+                            <td>{{ policy_rule.service.serviceId }}</td>
+                            <td>{{ policy_rule.service.deviceList }}</td>
+                            <td>{{ prse.Name(policy_rule.service.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td>
+                            <td>{{ policy_rule.service.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td>
+                        </tr>
+                    {% else %}
+                        <tr><td colspan="11">Unsupported policy rule type {{ policy_rule.WhichOneof('policy_rule') }}</td></tr>
+                    {% endif %}
+                {% endfor %}
+            {% else %}
+                <tr><td colspan="11">No policy rule found</td></tr>
+            {% endif %}
+        </tbody>
+    </table>
+
+{% endblock %}