diff --git a/hackfest/p4/setup.sh b/hackfest/p4/setup.sh
index 07fe22e6aea2341c50462010b4bfb55c4a657a47..195327a03fedafdc64a2d0dc34577766eda72a4f 100755
--- a/hackfest/p4/setup.sh
+++ b/hackfest/p4/setup.sh
@@ -4,5 +4,5 @@ export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}')
 
 kubectl exec ${POD_NAME} -n=tfs -- mkdir /root/p4
 
-kubectl cp src/tests/netx22-p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4
-kubectl cp src/tests/netx22-p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4
+kubectl cp hackfest/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4
+kubectl cp hackfest/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4
diff --git a/hackfest/p4/tests/Objects.py b/hackfest/p4/tests/Objects.py
index 09b3aced843a198b7c963a34492a4fe2379c9123..c8b172244d714cd699ccc587e54c3751485a9a2e 100644
--- a/hackfest/p4/tests/Objects.py
+++ b/hackfest/p4/tests/Objects.py
@@ -1,4 +1,5 @@
 # Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
@@ -42,6 +43,8 @@ PACKET_PORT_SAMPLE_TYPES = [
     KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED,
 ]
 
+# ----- Device Credentials and Settings --------------------------------------------------------------------------------
+
 
 # ----- Devices --------------------------------------------------------------------------------------------------------
 
@@ -54,7 +57,7 @@ DEVICE_SW1                  = json_device_p4_disabled(DEVICE_SW1_UUID)
 
 DEVICE_SW1_DPID             = 1
 DEVICE_SW1_NAME             = DEVICE_SW1_UUID
-DEVICE_SW1_IP_ADDR          = '10.0.2.10'
+DEVICE_SW1_IP_ADDR          = 'localhost'
 DEVICE_SW1_PORT             = '50001'
 DEVICE_SW1_VENDOR           = 'Open Networking Foundation'
 DEVICE_SW1_HW_VER           = 'BMv2 simple_switch'
@@ -78,9 +81,38 @@ DEVICE_SW1_CONNECT_RULES    = json_device_connect_rules(
     }
 )
 
+DEVICE_SW2_UUID             = 'SW2'
+DEVICE_SW2_TIMEOUT          = 60
+DEVICE_SW2_ID               = json_device_id(DEVICE_SW2_UUID)
+DEVICE_SW2                  = json_device_p4_disabled(DEVICE_SW2_UUID)
 
-################################## TABLE ENTRIES ##################################
+DEVICE_SW2_DPID             = 1
+DEVICE_SW2_NAME             = DEVICE_SW2_UUID
+DEVICE_SW2_IP_ADDR          = 'localhost'
+DEVICE_SW2_PORT             = '50002'
+DEVICE_SW2_VENDOR           = 'Open Networking Foundation'
+DEVICE_SW2_HW_VER           = 'BMv2 simple_switch'
+DEVICE_SW2_SW_VER           = 'Stratum'
 
+DEVICE_SW2_BIN_PATH         = '/root/p4/bmv2.json'
+DEVICE_SW2_INFO_PATH        = '/root/p4/p4info.txt'
+
+DEVICE_SW2_CONNECT_RULES    = json_device_connect_rules(
+    DEVICE_SW2_IP_ADDR,
+    DEVICE_SW2_PORT,
+    {
+        'id':       DEVICE_SW2_DPID,
+        'name':     DEVICE_SW2_NAME,
+        'vendor':   DEVICE_SW2_VENDOR,
+        'hw_ver':   DEVICE_SW2_HW_VER,
+        'sw_ver':   DEVICE_SW2_SW_VER,
+        'timeout':  DEVICE_SW2_TIMEOUT,
+        'p4bin':    DEVICE_SW2_BIN_PATH,
+        'p4info':   DEVICE_SW2_INFO_PATH
+    }
+)
+
+################################## TABLE ENTRIES ##################################
 
 DEVICE_SW1_CONFIG_TABLE_ENTRIES = [
     json_config_rule_set(
@@ -123,6 +155,8 @@ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [
     )
 ]
 
+DEVICE_SW2_CONFIG_TABLE_ENTRIES = DEVICE_SW1_CONFIG_TABLE_ENTRIES 
+
 
 """
 DEVICE_SW1_CONFIG_TABLE_ENTRIES = [
@@ -171,7 +205,6 @@ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [
 
 ################################## TABLE DECONF ##################################
 
-
 DEVICE_SW1_DECONF_TABLE_ENTRIES = [
     json_config_rule_delete(
         'table',
@@ -213,6 +246,7 @@ DEVICE_SW1_DECONF_TABLE_ENTRIES = [
     )
 ]
 
+DEVICE_SW2_DECONF_TABLE_ENTRIES = DEVICE_SW1_DECONF_TABLE_ENTRIES 
 
 
 """
@@ -271,6 +305,7 @@ TOPOLOGIES = [TOPOLOGY]
 
 DEVICES = [
     (DEVICE_SW1, DEVICE_SW1_CONNECT_RULES, DEVICE_SW1_CONFIG_TABLE_ENTRIES, DEVICE_SW1_DECONF_TABLE_ENTRIES),
+    (DEVICE_SW2, DEVICE_SW2_CONNECT_RULES, DEVICE_SW2_CONFIG_TABLE_ENTRIES, DEVICE_SW2_DECONF_TABLE_ENTRIES),
 ]
 
 LINKS = []
diff --git a/hackfest/p4/tests/test_functional_cleanup.py b/hackfest/p4/tests/test_functional_cleanup.py
index 32f716f1c2287b11bae3610022d64659d82ba73d..ccbcb9843a03bbf095743af0753da3fe8af3bfce 100644
--- a/hackfest/p4/tests/test_functional_cleanup.py
+++ b/hackfest/p4/tests/test_functional_cleanup.py
@@ -54,8 +54,8 @@ def test_scenario_cleanup(
         device_client.DeleteDevice(DeviceId(**device_id))
         #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
 
-        response = context_client.ListDevices(Empty())
-        assert len(response.devices) == 0
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 0
 
     # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------
     for topology in TOPOLOGIES:
diff --git a/proto/kpi_sample_types.proto b/proto/kpi_sample_types.proto
index 7445a0f25a57df9793bd8761da024581988cf9e6..4419a8df4a22047d8708c5cf2e2c3657148b5eeb 100644
--- a/proto/kpi_sample_types.proto
+++ b/proto/kpi_sample_types.proto
@@ -16,9 +16,19 @@ syntax = "proto3";
 package kpi_sample_types;
 
 enum KpiSampleType {
-    KPISAMPLETYPE_UNKNOWN = 0;
-    KPISAMPLETYPE_PACKETS_TRANSMITTED = 101;
-    KPISAMPLETYPE_PACKETS_RECEIVED    = 102;
-    KPISAMPLETYPE_BYTES_TRANSMITTED   = 201;
-    KPISAMPLETYPE_BYTES_RECEIVED      = 202;
+    KPISAMPLETYPE_UNKNOWN                       = 0;
+    KPISAMPLETYPE_PACKETS_TRANSMITTED           = 101;
+    KPISAMPLETYPE_PACKETS_RECEIVED              = 102;
+    KPISAMPLETYPE_PACKETS_DROPPED               = 103;
+    KPISAMPLETYPE_BYTES_TRANSMITTED             = 201;
+    KPISAMPLETYPE_BYTES_RECEIVED                = 202;
+    KPISAMPLETYPE_BYTES_DROPPED                 = 203;
+    KPISAMPLETYPE_ML_CONFIDENCE                 = 401;  //. can be used by both optical and L3 without any issue
+    KPISAMPLETYPE_OPTICAL_SECURITY_STATUS       = 501;  //. can be used by both optical and L3 without any issue
+    KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS        = 601;
+    KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS       = 602;
+    KPISAMPLETYPE_L3_UNIQUE_ATTACKERS           = 603;
+    KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS = 604;
+    KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO     = 605;
+    KPISAMPLETYPE_SERVICE_LATENCY_MS            = 701;
 }
diff --git a/proto/monitoring.proto b/proto/monitoring.proto
index 9be39db909d915b2a9b5d99b01841db028959543..f9c408c96ced121f35cc1116bf64d013e7320e6a 100644
--- a/proto/monitoring.proto
+++ b/proto/monitoring.proto
@@ -25,7 +25,7 @@ service MonitoringService {
   rpc GetKpiDescriptorList  (context.Empty      ) returns (KpiDescriptorList   ) {} // Stable and final
   rpc IncludeKpi            (Kpi                ) returns (context.Empty       ) {} // Stable and final
   rpc MonitorKpi            (MonitorKpiRequest  ) returns (context.Empty       ) {} // Stable and final
-  rpc QueryKpiData          (KpiQuery           ) returns (KpiList             ) {} // Not implemented
+  rpc QueryKpiData          (KpiQuery           ) returns (RawKpiTable         ) {} // Not implemented
   rpc SetKpiSubscription    (SubsDescriptor     ) returns (stream SubsResponse ) {} // Stable not final
   rpc GetSubsDescriptor     (SubscriptionID     ) returns (SubsDescriptor      ) {} // Stable and final
   rpc GetSubscriptions      (context.Empty      ) returns (SubsList            ) {} // Stable and final
@@ -36,7 +36,7 @@ service MonitoringService {
   rpc GetAlarmResponseStream(AlarmSubscription  ) returns (stream AlarmResponse) {} // Not Stable not final
   rpc DeleteAlarm           (AlarmID            ) returns (context.Empty       ) {} // Stable and final
   rpc GetStreamKpi          (KpiId              ) returns (stream Kpi          ) {} // Stable not final
-  rpc GetInstantKpi         (KpiId              ) returns (Kpi             ) {} // Stable not final
+  rpc GetInstantKpi         (KpiId              ) returns (Kpi                 ) {} // Stable not final
 }
 
 message KpiDescriptor {
@@ -48,6 +48,7 @@ message KpiDescriptor {
   context.EndPointId             endpoint_id     = 6;
   context.ServiceId              service_id      = 7;
   context.SliceId                slice_id        = 8;
+  context.ConnectionId           connection_id   = 9;
 }
 
 message MonitorKpiRequest {
@@ -58,13 +59,26 @@ message MonitorKpiRequest {
 }
 
 message KpiQuery {
-  KpiId    kpi_id              = 1;
+  repeated KpiId    kpi_ids             = 1;
   float             monitoring_window_s = 2;
-  float             sampling_rate_s     = 3;
-  uint32            last_n_samples      = 4;  // used when you want something like "get the last N many samples
-  context.Timestamp start_timestamp     = 5;  // used when you want something like "get the samples since X date/time"
-  context.Timestamp end_timestamp       = 6;  // used when you want something like "get the samples until X date/time"
-  // Pending add field to reflect Available Device Protocols
+  uint32            last_n_samples      = 3;  // used when you want something like "get the last N many samples
+  context.Timestamp start_timestamp     = 4;  // used when you want something like "get the samples since X date/time"
+  context.Timestamp end_timestamp       = 5;  // used when you want something like "get the samples until X date/time"
+}
+
+
+message RawKpi { // cell
+  context.Timestamp timestamp = 1;
+  KpiValue          kpi_value = 2;
+}
+
+message RawKpiList { // column
+  KpiId           kpi_id    = 1;
+  repeated RawKpi raw_kpis  = 2;
+}
+
+message RawKpiTable { // table
+  repeated RawKpiList raw_kpi_lists = 1;
 }
 
 message KpiId {
diff --git a/proto/policy.proto b/proto/policy.proto
index d8e51caea2231e21b982771e7a4d63f3db93471c..9d0c34a3304f68c47a19ac56d0e96b10936bee7b 100644
--- a/proto/policy.proto
+++ b/proto/policy.proto
@@ -109,5 +109,5 @@ message PolicyRuleDeviceList {
 
 // A list of policy rules
 message PolicyRuleList {
-  repeated PolicyRuleId policyRules = 1;
+  repeated PolicyRule policyRules = 1;
 }
diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh
index 520a9da1c652553eb90acd083caf5724275f4efe..faa825fdfae2bb85f0790a877b75d533ff5aa0d5 100755
--- a/scripts/show_logs_monitoring.sh
+++ b/scripts/show_logs_monitoring.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringserver
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice server
diff --git a/src/common/database/api/context/slice/__init__.py b/src/common/database/api/context/slice/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/src/common/tests/LoadScenario.py b/src/common/tests/LoadScenario.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c3940e67b5772f3ba3ec0634c49f26b92bbc571
--- /dev/null
+++ b/src/common/tests/LoadScenario.py
@@ -0,0 +1,50 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
+
+LOGGER = logging.getLogger(__name__)
+LOGGERS = {
+    'success': LOGGER.info,
+    'danger' : LOGGER.error,
+    'error'  : LOGGER.error,
+}
+
+def load_scenario_from_descriptor(
+    descriptor_file : str, context_client : ContextClient, device_client : DeviceClient,
+    service_client : ServiceClient, slice_client : SliceClient
+) -> DescriptorLoader:
+    with open(descriptor_file, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(
+        descriptors,
+        context_client=context_client, device_client=device_client,
+        service_client=service_client, slice_client=slice_client)
+    results = descriptor_loader.process()
+
+    num_errors = 0
+    for message,level in compose_notifications(results):
+        LOGGERS.get(level)(message)
+        if level != 'success': num_errors += 1
+    if num_errors > 0:
+        MSG = 'Failed to load descriptors in file {:s}'
+        raise Exception(MSG.format(str(descriptor_file)))
+
+    return descriptor_loader
\ No newline at end of file
diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py
index c47db248e61485e314703a43ce3cd535409cdea7..0a202ccd810ed50beca4bb9a7b4441305623f1ed 100644
--- a/src/common/tools/context_queries/InterDomain.py
+++ b/src/common/tools/context_queries/InterDomain.py
@@ -16,13 +16,13 @@ import logging
 from typing import Dict, List, Set, Tuple
 from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
 from common.DeviceTypes import DeviceTypeEnum
-from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice, TopologyId
+from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice
 from common.proto.pathcomp_pb2 import PathCompRequest
 from common.tools.context_queries.CheckType import device_type_is_network
-from common.tools.context_queries.Device import get_devices_in_topology, get_uuids_of_devices_in_topology
+from common.tools.context_queries.Device import get_devices_in_topology
+from common.tools.context_queries.Topology import get_topology
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 
@@ -60,8 +60,13 @@ def get_local_device_uuids(context_client : ContextClient) -> Set[str]:
     return local_device_uuids
 
 def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]:
-    interdomain_topology_id = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID))
-    interdomain_topology = context_client.GetTopology(interdomain_topology_id)
+    context_uuid = DEFAULT_CONTEXT_UUID
+    topology_uuid = INTERDOMAIN_TOPOLOGY_UUID
+    interdomain_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid)
+    if interdomain_topology is None:
+        MSG = '[get_interdomain_device_uuids] {:s}/{:s} topology not found'
+        LOGGER.warning(MSG.format(context_uuid, topology_uuid))
+        return set()
 
     # add abstracted devices in the interdomain topology
     interdomain_device_ids = interdomain_topology.device_ids
diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py
new file mode 100644
index 0000000000000000000000000000000000000000..15b201e731760068457683d9e30f79ab12d231d7
--- /dev/null
+++ b/src/common/tools/context_queries/Service.py
@@ -0,0 +1,39 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from typing import Optional
+from common.Constants import DEFAULT_CONTEXT_UUID
+from common.proto.context_pb2 import Service, ServiceId
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+def get_service(
+        context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        rw_copy : bool = False
+    ) -> Optional[Service]:
+    try:
+        # pylint: disable=no-member
+        service_id = ServiceId()
+        service_id.context_id.context_uuid.uuid = context_uuid
+        service_id.service_uuid.uuid = service_uuid
+        ro_service = context_client.GetService(service_id)
+        if not rw_copy: return ro_service
+        rw_service = Service()
+        rw_service.CopyFrom(ro_service)
+        return rw_service
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid)))
+        return None
diff --git a/src/common/tools/context_queries/Slice.py b/src/common/tools/context_queries/Slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f884aa94990c28ad786b3243aed948ddc7f9f34
--- /dev/null
+++ b/src/common/tools/context_queries/Slice.py
@@ -0,0 +1,39 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from typing import Optional
+from common.Constants import DEFAULT_CONTEXT_UUID
+from common.proto.context_pb2 import Slice, SliceId
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+def get_slice(
+        context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        rw_copy : bool = False
+    ) -> Optional[Slice]:
+    try:
+        # pylint: disable=no-member
+        slice_id = SliceId()
+        slice_id.context_id.context_uuid.uuid = context_uuid
+        slice_id.slice_uuid.uuid = slice_uuid
+        ro_slice = context_client.GetSlice(slice_id)
+        if not rw_copy: return ro_slice
+        rw_slice = Slice()
+        rw_slice.CopyFrom(ro_slice)
+        return rw_slice
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid)))
+        return None
diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py
index fcf1b96bb51571a71ab35fb743f8154f02e2d200..3d2077e965efb3e78ad9febbe54b4f0aaea5aef6 100644
--- a/src/common/tools/context_queries/Topology.py
+++ b/src/common/tools/context_queries/Topology.py
@@ -12,12 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import List
-from common.proto.context_pb2 import ContextId, Topology
+import grpc, logging
+from typing import List, Optional
+from common.Constants import DEFAULT_CONTEXT_UUID
+from common.proto.context_pb2 import ContextId, Topology, TopologyId
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology
 from context.client.ContextClient import ContextClient
 
+LOGGER = logging.getLogger(__name__)
+
 def create_topology(
     context_client : ContextClient, context_uuid : str, topology_uuid : str
 ) -> None:
@@ -39,3 +43,21 @@ def create_missing_topologies(
         if topology_uuid in existing_topology_uuids: continue
         grpc_topology = Topology(**json_topology(topology_uuid, context_id=context_id))
         context_client.SetTopology(grpc_topology)
+
+def get_topology(
+        context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        rw_copy : bool = False
+    ) -> Optional[Topology]:
+    try:
+        # pylint: disable=no-member
+        topology_id = TopologyId()
+        topology_id.context_id.context_uuid.uuid = context_uuid
+        topology_id.topology_uuid.uuid = topology_uuid
+        ro_topology = context_client.GetTopology(topology_id)
+        if not rw_copy: return ro_topology
+        rw_topology = Topology()
+        rw_topology.CopyFrom(ro_topology)
+        return rw_topology
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get topology({:s} / {:s})'.format(str(context_uuid), str(topology_uuid)))
+        return None
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..f14e2caf6065996ea6223449f309e03d141b5954
--- /dev/null
+++ b/src/common/tools/descriptor/Loader.py
@@ -0,0 +1,254 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# SDN controller descriptor loader
+
+# Usage example (WebUI):
+#    descriptors = json.loads(descriptors_data_from_client)
+#    descriptor_loader = DescriptorLoader(descriptors)
+#    results = descriptor_loader.process()
+#    for message,level in compose_notifications(results):
+#        flash(message, level)
+
+# Usage example (pytest):
+#    with open('path/to/descriptor.json', 'r', encoding='UTF-8') as f:
+#        descriptors = json.loads(f.read())
+#    descriptor_loader = DescriptorLoader(
+#       descriptors, context_client=..., device_client=..., service_client=..., slice_client=...)
+#    results = descriptor_loader.process()
+#    loggers = {'success': LOGGER.info, 'danger': LOGGER.error, 'error': LOGGER.error}
+#    for message,level in compose_notifications(results):
+#        loggers.get(level)(message)
+
+import json
+from typing import Dict, List, Optional, Tuple, Union
+from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
+from .Tools import (
+    format_device_custom_config_rules, format_service_custom_config_rules, format_slice_custom_config_rules,
+    get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices,
+    get_descriptors_add_topologies, split_devices_by_rules)
+
+ENTITY_TO_TEXT = {
+    # name   => singular,    plural
+    'context'   : ('Context',    'Contexts'   ),
+    'topology'  : ('Topology',   'Topologies' ),
+    'device'    : ('Device',     'Devices'    ),
+    'link'      : ('Link',       'Links'      ),
+    'service'   : ('Service',    'Services'   ),
+    'slice'     : ('Slice',      'Slices'     ),
+    'connection': ('Connection', 'Connections'),
+}
+
+ACTION_TO_TEXT = {
+    # action =>  infinitive,  past
+    'add'     : ('Add',       'Added'),
+    'update'  : ('Update',    'Updated'),
+    'config'  : ('Configure', 'Configured'),
+}
+
+TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_ok, list[error]
+TypeNotification = Tuple[str, str] # message, level
+TypeNotificationList = List[TypeNotification]
+
+def compose_notifications(results : TypeResults) -> TypeNotificationList:
+    notifications = []
+    for entity_name, action_name, num_ok, error_list in results:
+        entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name]
+        action_infinitive, action_past = ACTION_TO_TEXT[action_name]
+        num_err = len(error_list)
+        for error in error_list:
+            notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error'))
+        if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success'))
+        if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger'))
+    return notifications
+
+class DescriptorLoader:
+    def __init__(
+        self, descriptors : Union[str, Dict],
+        context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None,
+        service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None
+    ) -> None:
+        self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors
+        self.__dummy_mode  = self.__descriptors.get('dummy_mode' , False)
+        self.__contexts    = self.__descriptors.get('contexts'   , [])
+        self.__topologies  = self.__descriptors.get('topologies' , [])
+        self.__devices     = self.__descriptors.get('devices'    , [])
+        self.__links       = self.__descriptors.get('links'      , [])
+        self.__services    = self.__descriptors.get('services'   , [])
+        self.__slices      = self.__descriptors.get('slices'     , [])
+        self.__connections = self.__descriptors.get('connections', [])
+
+        self.__contexts_add   = None
+        self.__topologies_add = None
+        self.__devices_add    = None
+        self.__devices_config = None
+        self.__services_add   = None
+        self.__slices_add     = None
+
+        self.__ctx_cli = ContextClient() if context_client is None else context_client
+        self.__dev_cli = DeviceClient()  if device_client  is None else device_client
+        self.__svc_cli = ServiceClient() if service_client is None else service_client
+        self.__slc_cli = SliceClient()   if slice_client   is None else slice_client
+
+        self.__results : TypeResults = list()
+
+    @property
+    def contexts(self) -> List[Dict]: return self.__contexts
+
+    @property
+    def num_contexts(self) -> int: return len(self.__contexts)
+
+    @property
+    def topologies(self) -> Dict[str, List[Dict]]:
+        _topologies = {}
+        for topology in self.__topologies:
+            context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
+            _topologies.setdefault(context_uuid, []).append(topology)
+        return _topologies
+
+    @property
+    def num_topologies(self) -> Dict[str, int]:
+        _num_topologies = {}
+        for topology in self.__topologies:
+            context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
+            _num_topologies[context_uuid] = _num_topologies.get(context_uuid, 0) + 1
+        return _num_topologies
+
+    @property
+    def devices(self) -> List[Dict]: return self.__devices
+
+    @property
+    def num_devices(self) -> int: return len(self.__devices)
+
+    @property
+    def links(self) -> List[Dict]: return self.__links
+
+    @property
+    def num_links(self) -> int: return len(self.__links)
+
+    @property
+    def services(self) -> Dict[str, List[Dict]]:
+        _services = {}
+        for service in self.__services:
+            context_uuid = service['service_id']['context_id']['context_uuid']['uuid']
+            _services.setdefault(context_uuid, []).append(service)
+        return _services
+
+    @property
+    def num_services(self) -> Dict[str, int]:
+        _num_services = {}
+        for service in self.__services:
+            context_uuid = service['service_id']['context_id']['context_uuid']['uuid']
+            _num_services[context_uuid] = _num_services.get(context_uuid, 0) + 1
+        return _num_services
+
+    @property
+    def slices(self) -> Dict[str, List[Dict]]:
+        _slices = {}
+        for slice_ in self.__slices:
+            context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid']
+            _slices.setdefault(context_uuid, []).append(slice_)
+        return _slices
+
+    @property
+    def num_slices(self) -> Dict[str, int]:
+        _num_slices = {}
+        for slice_ in self.__slices:
+            context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid']
+            _num_slices[context_uuid] = _num_slices.get(context_uuid, 0) + 1
+        return _num_slices
+
+    @property
+    def connections(self) -> List[Dict]: return self.__connections
+
+    @property
+    def num_connections(self) -> int: return len(self.__connections)
+
+    def process(self) -> TypeResults:
+        # Format CustomConfigRules in Devices, Services and Slices provided in JSON format
+        self.__devices  = [format_device_custom_config_rules (device ) for device  in self.__devices ]
+        self.__services = [format_service_custom_config_rules(service) for service in self.__services]
+        self.__slices   = [format_slice_custom_config_rules  (slice_ ) for slice_  in self.__slices  ]
+
+        # Context and Topology require to create the entity first, and add devices, links, services,
+        # slices, etc. in a second stage.
+        self.__contexts_add = get_descriptors_add_contexts(self.__contexts)
+        self.__topologies_add = get_descriptors_add_topologies(self.__topologies)
+
+        if self.__dummy_mode:
+            self._dummy_mode()
+        else:
+            self._normal_mode()
+        
+        return self.__results
+
+    def _dummy_mode(self) -> None:
+        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
+        self.__ctx_cli.connect()
+        self._process_descr('context',    'add',    self.__ctx_cli.SetContext,    Context,    self.__contexts_add  )
+        self._process_descr('topology',   'add',    self.__ctx_cli.SetTopology,   Topology,   self.__topologies_add)
+        self._process_descr('device',     'add',    self.__ctx_cli.SetDevice,     Device,     self.__devices       )
+        self._process_descr('link',       'add',    self.__ctx_cli.SetLink,       Link,       self.__links         )
+        self._process_descr('service',    'add',    self.__ctx_cli.SetService,    Service,    self.__services      )
+        self._process_descr('slice',      'add',    self.__ctx_cli.SetSlice,      Slice,      self.__slices        )
+        self._process_descr('connection', 'add',    self.__ctx_cli.SetConnection, Connection, self.__connections   )
+        self._process_descr('context',    'update', self.__ctx_cli.SetContext,    Context,    self.__contexts      )
+        self._process_descr('topology',   'update', self.__ctx_cli.SetTopology,   Topology,   self.__topologies    )
+        #self.__ctx_cli.close()
+
+    def _normal_mode(self) -> None:
+        # Normal mode: follows the automated workflows in the different components
+        assert len(self.__connections) == 0, 'in normal mode, connections should not be set'
+
+        # Device, Service and Slice require to first create the entity and the configure it
+        self.__devices_add, self.__devices_config = split_devices_by_rules(self.__devices)
+        self.__services_add = get_descriptors_add_services(self.__services)
+        self.__slices_add = get_descriptors_add_slices(self.__slices)
+
+        self.__ctx_cli.connect()
+        self.__dev_cli.connect()
+        self.__svc_cli.connect()
+        self.__slc_cli.connect()
+
+        self._process_descr('context',  'add',    self.__ctx_cli.SetContext,      Context,  self.__contexts_add  )
+        self._process_descr('topology', 'add',    self.__ctx_cli.SetTopology,     Topology, self.__topologies_add)
+        self._process_descr('device',   'add',    self.__dev_cli.AddDevice,       Device,   self.__devices_add   )
+        self._process_descr('device',   'config', self.__dev_cli.ConfigureDevice, Device,   self.__devices_config)
+        self._process_descr('link',     'add',    self.__ctx_cli.SetLink,         Link,     self.__links         )
+        self._process_descr('service',  'add',    self.__svc_cli.CreateService,   Service,  self.__services_add  )
+        self._process_descr('service',  'update', self.__svc_cli.UpdateService,   Service,  self.__services      )
+        self._process_descr('slice',    'add',    self.__slc_cli.CreateSlice,     Slice,    self.__slices_add    )
+        self._process_descr('slice',    'update', self.__slc_cli.UpdateSlice,     Slice,    self.__slices        )
+        self._process_descr('context',  'update', self.__ctx_cli.SetContext,      Context,  self.__contexts      )
+        self._process_descr('topology', 'update', self.__ctx_cli.SetTopology,     Topology, self.__topologies    )
+
+        #self.__slc_cli.close()
+        #self.__svc_cli.close()
+        #self.__dev_cli.close()
+        #self.__ctx_cli.close()
+
+    def _process_descr(self, entity_name, action_name, grpc_method, grpc_class, entities) -> None:
+        num_ok, error_list = 0, []
+        for entity in entities:
+            try:
+                grpc_method(grpc_class(**entity))
+                num_ok += 1
+            except Exception as e: # pylint: disable=broad-except
+                error_list.append(f'{str(entity)}: {str(e)}')
+                num_err += 1
+        self.__results.append((entity_name, action_name, num_ok, error_list))
diff --git a/src/webui/service/main/DescriptorTools.py b/src/common/tools/descriptor/Tools.py
similarity index 79%
rename from src/webui/service/main/DescriptorTools.py
rename to src/common/tools/descriptor/Tools.py
index 094be2f7d0cfd69ddb5cddc2238e8cec64c75daa..909cec9d97b5baa2f7b0198091c3921a71c9b1f7 100644
--- a/src/webui/service/main/DescriptorTools.py
+++ b/src/common/tools/descriptor/Tools.py
@@ -41,8 +41,8 @@ def get_descriptors_add_services(services : List[Dict]) -> List[Dict]:
 
 def get_descriptors_add_slices(slices : List[Dict]) -> List[Dict]:
     slices_add = []
-    for slice in slices:
-        slice_copy = copy.deepcopy(slice)
+    for slice_ in slices:
+        slice_copy = copy.deepcopy(slice_)
         slice_copy['slice_endpoint_ids'] = []
         slice_copy['slice_constraints'] = []
         slice_copy['slice_config'] = {'config_rules': []}
@@ -59,6 +59,24 @@ def format_custom_config_rules(config_rules : List[Dict]) -> List[Dict]:
             config_rule['custom']['resource_value'] = custom_resource_value
     return config_rules
 
+def format_device_custom_config_rules(device : Dict) -> Dict:
+    config_rules = device.get('device_config', {}).get('config_rules', [])
+    config_rules = format_custom_config_rules(config_rules)
+    device['device_config']['config_rules'] = config_rules
+    return device
+
+def format_service_custom_config_rules(service : Dict) -> Dict:
+    config_rules = service.get('service_config', {}).get('config_rules', [])
+    config_rules = format_custom_config_rules(config_rules)
+    service['service_config']['config_rules'] = config_rules
+    return service
+
+def format_slice_custom_config_rules(slice_ : Dict) -> Dict:
+    config_rules = slice_.get('service_config', {}).get('config_rules', [])
+    config_rules = format_custom_config_rules(config_rules)
+    slice_['service_config']['config_rules'] = config_rules
+    return slice_
+
 def split_devices_by_rules(devices : List[Dict]) -> Tuple[List[Dict], List[Dict]]:
     devices_add = []
     devices_config = []
diff --git a/src/compute/tests/mock_osm/__init__.py b/src/common/tools/descriptor/__init__.py
similarity index 100%
rename from src/compute/tests/mock_osm/__init__.py
rename to src/common/tools/descriptor/__init__.py
diff --git a/src/tests/ofc22/tests/BuildDescriptors.py b/src/common/tools/descriptor/old/BuildDescriptors.py
similarity index 100%
rename from src/tests/ofc22/tests/BuildDescriptors.py
rename to src/common/tools/descriptor/old/BuildDescriptors.py
diff --git a/src/tests/ofc22/tests/LoadDescriptors.py b/src/common/tools/descriptor/old/LoadDescriptors.py
similarity index 100%
rename from src/tests/ofc22/tests/LoadDescriptors.py
rename to src/common/tools/descriptor/old/LoadDescriptors.py
index 33bc699af933601e4c6d4b8dbc7b0c51206241ef..f0b19196afbcd67c1f20263791d20820489b9cf5 100644
--- a/src/tests/ofc22/tests/LoadDescriptors.py
+++ b/src/common/tools/descriptor/old/LoadDescriptors.py
@@ -14,8 +14,8 @@
 
 import json, logging, sys
 from common.Settings import get_setting
-from context.client.ContextClient import ContextClient
 from common.proto.context_pb2 import Context, Device, Link, Topology
+from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 
 LOGGER = logging.getLogger(__name__)
diff --git a/src/common/tools/object_factory/PolicyRule.py b/src/common/tools/object_factory/PolicyRule.py
new file mode 100644
index 0000000000000000000000000000000000000000..8702f931dfffef175ce6c25de24a10de8286effc
--- /dev/null
+++ b/src/common/tools/object_factory/PolicyRule.py
@@ -0,0 +1,48 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Dict, List, Optional
+from common.proto.policy_condition_pb2 import BooleanOperator
+
+LOGGER = logging.getLogger(__name__)
+
+def json_policy_rule_id(policy_rule_uuid : str) -> Dict:
+    return {'uuid': {'uuid': policy_rule_uuid}}
+
+def json_policy_rule(
+    policy_rule_uuid : str, policy_priority : int = 1,
+    boolean_operator : BooleanOperator = BooleanOperator.POLICYRULE_CONDITION_BOOLEAN_AND,
+    condition_list : List[Dict] = [], action_list : List[Dict] = [],
+    service_id : Optional[Dict] = None, device_id_list : List[Dict] = []
+) -> Dict:
+    basic = {
+        'policyRuleId': json_policy_rule_id(policy_rule_uuid),
+        'priority': policy_priority,
+        'conditionList': condition_list,
+        'booleanOperator': boolean_operator,
+        'actionList': action_list,
+    }
+
+    result = {}
+    if service_id is not None:
+        policy_rule_type = 'service'
+        result[policy_rule_type] = {'policyRuleBasic': basic}
+        result[policy_rule_type]['serviceId'] = service_id
+    else:
+        policy_rule_type = 'device'
+        result[policy_rule_type] = {'policyRuleBasic': basic}
+
+    result[policy_rule_type]['deviceList'] = device_id_list
+    return result
diff --git a/src/compute/Dockerfile b/src/compute/Dockerfile
index bdc07584c5bd8f08fdef6f997cc18dcfd9eeb3e6..90a69c0f503724fd1098608d85ad5eca874e3f8b 100644
--- a/src/compute/Dockerfile
+++ b/src/compute/Dockerfile
@@ -66,6 +66,8 @@ COPY src/compute/. compute/
 COPY src/context/. context/
 COPY src/service/. service/
 COPY src/slice/. slice/
+RUN mkdir -p /var/teraflow/tests/tools
+COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/
 
 # Start the service
 ENTRYPOINT ["python", "-m", "compute.service"]
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
index 7e050289f19b93dc710185c2b29b326bbfd156d2..e3d12088147a59c3fd9e0179d3a3d957483fcc22 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
@@ -17,10 +17,10 @@ from flask import request
 from flask.json import jsonify
 from flask_restful import Resource
 from common.proto.context_pb2 import SliceStatusEnum
+from common.tools.context_queries.Slice import get_slice
 from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
 from .tools.Authentication import HTTP_AUTH
-from .tools.ContextMethods import get_slice
 from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR
 
 LOGGER = logging.getLogger(__name__)
@@ -34,7 +34,7 @@ class L2VPN_Service(Resource):
         try:
             context_client = ContextClient()
 
-            target = get_slice(context_client, vpn_id)
+            target = get_slice(context_client, vpn_id, rw_copy=True)
             if target is None:
                 raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index 8aa410e9a2136f8f0c6df94a9d17ea376fcfc516..819d8995da6ffc3a7913c8781e4021ce83665e29 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -20,6 +20,7 @@ from flask.wrappers import Response
 from flask_restful import Resource
 from werkzeug.exceptions import UnsupportedMediaType
 from common.proto.context_pb2 import Slice
+from common.tools.context_queries.Slice import get_slice
 from common.tools.grpc.ConfigRules import update_config_rule_custom
 from common.tools.grpc.Constraints import (
     update_constraint_custom_dict, update_constraint_endpoint_location, update_constraint_endpoint_priority,
@@ -30,7 +31,6 @@ from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
 from .schemas.site_network_access import SCHEMA_SITE_NETWORK_ACCESS
 from .tools.Authentication import HTTP_AUTH
-from .tools.ContextMethods import get_slice
 from .tools.HttpStatusCodes import HTTP_NOCONTENT, HTTP_SERVERERROR
 from .tools.Validator import validate_message
 from .Constants import (
@@ -69,7 +69,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
         address_ip, address_prefix, remote_router, circuit_id
     ) = mapping
 
-    target = get_slice(context_client, vpn_id)
+    target = get_slice(context_client, vpn_id, rw_copy=True)
     if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
     endpoint_ids = target.slice_endpoint_ids        # pylint: disable=no-member
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py
deleted file mode 100644
index ac9e6fe4a5c138d00bc80fd953de2cc21d4677b5..0000000000000000000000000000000000000000
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import grpc, logging
-from typing import Optional
-from common.Constants import DEFAULT_CONTEXT_UUID
-from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
-from context.client.ContextClient import ContextClient
-
-LOGGER = logging.getLogger(__name__)
-
-def get_service(
-        context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID
-    ) -> Optional[Service]:
-    try:
-        # pylint: disable=no-member
-        service_id = ServiceId()
-        service_id.context_id.context_uuid.uuid = context_uuid
-        service_id.service_uuid.uuid = service_uuid
-        service_readonly = context_client.GetService(service_id)
-        service = Service()
-        service.CopyFrom(service_readonly)
-        return service
-    except grpc.RpcError:
-        #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid)))
-        return None
-
-def get_slice(
-        context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID
-    ) -> Optional[Slice]:
-    try:
-        # pylint: disable=no-member
-        slice_id = SliceId()
-        slice_id.context_id.context_uuid.uuid = context_uuid
-        slice_id.slice_uuid.uuid = slice_uuid
-        slice_readonly = context_client.GetSlice(slice_id)
-        slice_ = Slice()
-        slice_.CopyFrom(slice_readonly)
-        return slice_
-    except grpc.RpcError:
-        #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid)))
-        return None
diff --git a/src/compute/tests/PrepareTestScenario.py b/src/compute/tests/PrepareTestScenario.py
index d534a4a28280c80964096a9cb7291c498ebe6b93..06fb34f9ee7508f4bd6fa769da78c50eb78c3bb8 100644
--- a/src/compute/tests/PrepareTestScenario.py
+++ b/src/compute/tests/PrepareTestScenario.py
@@ -19,7 +19,7 @@ from common.Settings import (
 from compute.service.rest_server.RestServer import RestServer
 from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
 from compute.tests.MockService_Dependencies import MockService_Dependencies
-from .mock_osm.MockOSM import MockOSM
+from tests.tools.mock_osm.MockOSM import MockOSM
 from .Constants import WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD
 
 LOCAL_HOST = '127.0.0.1'
diff --git a/src/compute/tests/test_unitary.py b/src/compute/tests/test_unitary.py
index 05c45c1b3554d21084a4a20cac6856b049fe7ca3..acef6d4a68cb1e89df2fa567d437412c8805b35f 100644
--- a/src/compute/tests/test_unitary.py
+++ b/src/compute/tests/test_unitary.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from .mock_osm.MockOSM import MockOSM
+from tests.tools.mock_osm.MockOSM import MockOSM
 from .Constants import SERVICE_CONNECTION_POINTS_1, SERVICE_CONNECTION_POINTS_2, SERVICE_TYPE
 from .PrepareTestScenario import ( # pylint: disable=unused-import
     # be careful, order of symbols is important here!
diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py
index da907341f799def94694817242c106a913e03327..f91f36cf5bf73669e4010c8c65d9c4cabd9c6e2e 100644
--- a/src/context/client/ContextClient.py
+++ b/src/context/client/ContextClient.py
@@ -28,6 +28,8 @@ from common.proto.context_pb2 import (
     Slice, SliceEvent, SliceId, SliceIdList, SliceList,
     Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.context_pb2_grpc import ContextServiceStub
+from common.proto.context_policy_pb2_grpc import ContextPolicyServiceStub
+from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
 
 LOGGER = logging.getLogger(__name__)
 MAX_RETRIES = 15
@@ -42,17 +44,20 @@ class ContextClient:
         LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
         self.channel = None
         self.stub = None
+        self.policy_stub = None
         self.connect()
         LOGGER.debug('Channel created')
 
     def connect(self):
         self.channel = grpc.insecure_channel(self.endpoint)
         self.stub = ContextServiceStub(self.channel)
+        self.policy_stub = ContextPolicyServiceStub(self.channel)
 
     def close(self):
         if self.channel is not None: self.channel.close()
         self.channel = None
         self.stub = None
+        self.policy_stub = None
 
     @RETRY_DECORATOR
     def ListContextIds(self, request: Empty) -> ContextIdList:
@@ -361,3 +366,38 @@ class ContextClient:
         response = self.stub.GetConnectionEvents(request)
         LOGGER.debug('GetConnectionEvents result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
+
+    @RETRY_DECORATOR
+    def ListPolicyRuleIds(self, request: Empty) -> PolicyRuleIdList:
+        LOGGER.debug('ListPolicyRuleIds request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.policy_stub.ListPolicyRuleIds(request)
+        LOGGER.debug('ListPolicyRuleIds result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def ListPolicyRules(self, request: Empty) -> PolicyRuleList:
+        LOGGER.debug('ListPolicyRules request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.policy_stub.ListPolicyRules(request)
+        LOGGER.debug('ListPolicyRules result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetPolicyRule(self, request: PolicyRuleId) -> PolicyRule:
+        LOGGER.info('GetPolicyRule request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.policy_stub.GetPolicyRule(request)
+        LOGGER.info('GetPolicyRule result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def SetPolicyRule(self, request: PolicyRule) -> PolicyRuleId:
+        LOGGER.debug('SetPolicyRule request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.policy_stub.SetPolicyRule(request)
+        LOGGER.debug('SetPolicyRule result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RemovePolicyRule(self, request: PolicyRuleId) -> Empty:
+        LOGGER.debug('RemovePolicyRule request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.policy_stub.RemovePolicyRule(request)
+        LOGGER.debug('RemovePolicyRule result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/context/service/database/PolicyRuleModel.py b/src/context/service/database/PolicyRuleModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c84ea940482091a5667b2f11272748c7b444b6f
--- /dev/null
+++ b/src/context/service/database/PolicyRuleModel.py
@@ -0,0 +1,32 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import json
+from typing import Dict
+from common.orm.fields.PrimaryKeyField import PrimaryKeyField
+from common.orm.fields.StringField import StringField
+from common.orm.model.Model import Model
+
+LOGGER = logging.getLogger(__name__)
+
+class PolicyRuleModel(Model):
+    pk = PrimaryKeyField()
+    value = StringField(required=True, allow_empty=False)
+
+    def dump_id(self) -> Dict:
+        return {'uuid': {'uuid': self.pk}}
+
+    def dump(self) -> Dict:
+        return json.loads(self.value)
diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/grpc_server/ContextService.py
index 1b54ec5400c93cba3882dccb197479b75bb699af..5d4dd8bb991ed64a970f9815bb302fd33d51cf34 100644
--- a/src/context/service/grpc_server/ContextService.py
+++ b/src/context/service/grpc_server/ContextService.py
@@ -17,6 +17,7 @@ from common.Settings import get_service_port_grpc
 from common.message_broker.MessageBroker import MessageBroker
 from common.orm.Database import Database
 from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from common.proto.context_policy_pb2_grpc import add_ContextPolicyServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .ContextServiceServicerImpl import ContextServiceServicerImpl
 
@@ -31,3 +32,4 @@ class ContextService(GenericGrpcService):
 
     def install_servicers(self):
         add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+        add_ContextPolicyServiceServicer_to_server(self.context_servicer, self.server)
diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py
index ec12795bd8948ad93bfd759d222ef18b960bc0e3..f8dd188198606805e42449c3d690c20d3ad45f03 100644
--- a/src/context/service/grpc_server/ContextServiceServicerImpl.py
+++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py
@@ -28,13 +28,17 @@ from common.proto.context_pb2 import (
     Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
     Slice, SliceEvent, SliceId, SliceIdList, SliceList,
     Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
+from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule)
 from common.proto.context_pb2_grpc import ContextServiceServicer
+from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer
 from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
 from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException
+from common.tools.grpc.Tools import grpc_message_to_json
 from context.service.database.ConfigModel import update_config
 from context.service.database.ConnectionModel import ConnectionModel, set_path
 from context.service.database.ConstraintModel import set_constraints
 from context.service.database.ContextModel import ContextModel
+from context.service.database.PolicyRuleModel import PolicyRuleModel
 from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers
 from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types
 from context.service.database.Events import notify_event
@@ -61,11 +65,12 @@ METHOD_NAMES = [
     'ListLinkIds',       'ListLinks',       'GetLink',       'SetLink',       'RemoveLink',       'GetLinkEvents',
     'ListServiceIds',    'ListServices',    'GetService',    'SetService',    'RemoveService',    'GetServiceEvents',
     'ListSliceIds',      'ListSlices',      'GetSlice',      'SetSlice',      'RemoveSlice',      'GetSliceEvents',
+    'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule',
     'UnsetService',      'UnsetSlice',
 ]
 METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
 
-class ContextServiceServicerImpl(ContextServiceServicer):
+class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer):
     def __init__(self, database : Database, messagebroker : MessageBroker):
         LOGGER.debug('Creating Servicer...')
         self.lock = threading.Lock()
@@ -813,3 +818,56 @@ class ContextServiceServicerImpl(ContextServiceServicer):
     def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
         for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT):
             yield ConnectionEvent(**json.loads(message.content))
+
+
+    # ----- Policy -----------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList:
+        with self.lock:
+            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
+            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
+            return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules])
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList:
+        with self.lock:
+            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
+            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
+            return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules])
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule:
+        with self.lock:
+            policy_rule_uuid = request.uuid.uuid
+            db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid)
+            return PolicyRule(**db_policy_rule.dump())
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId:
+        with self.lock:
+            policy_rule_type = request.WhichOneof('policy_rule')
+            policy_rule_json = grpc_message_to_json(request)
+            policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid']
+            result: Tuple[PolicyRuleModel, bool] = update_or_create_object(
+                self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)})
+            db_policy, updated = result # pylint: disable=unused-variable
+
+            #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+            dict_policy_id = db_policy.dump_id()
+            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
+            return PolicyRuleId(**dict_policy_id)
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty:
+        with self.lock:
+            policy_uuid = request.uuid.uuid
+            db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False)
+            found = db_policy.load()
+            if not found: return Empty()
+
+            dict_policy_id = db_policy.dump_id()
+            db_policy.delete()
+            #event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
+            return Empty()
diff --git a/src/context/service/rest_server/Resources.py b/src/context/service/rest_server/Resources.py
index d1738edb20361dab70334bc026d94d37c654127a..5f03132a34004388596ce1fdfac470f029c093ea 100644
--- a/src/context/service/rest_server/Resources.py
+++ b/src/context/service/rest_server/Resources.py
@@ -17,6 +17,7 @@ from flask.json import jsonify
 from flask_restful import Resource
 from common.orm.Database import Database
 from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId
+from common.proto.policy_pb2 import PolicyRuleId
 from common.tools.grpc.Tools import grpc_message_to_json
 from context.service.grpc_server.ContextServiceServicerImpl import ContextServiceServicerImpl
 
@@ -61,6 +62,11 @@ def grpc_topology_id(context_uuid, topology_uuid):
         'topology_uuid': {'uuid': topology_uuid}
     })
 
+def grpc_policy_rule_id(policy_rule_uuid):
+    return PolicyRuleId(**{
+        'uuid': {'uuid': policy_rule_uuid}
+    })
+
 class _Resource(Resource):
     def __init__(self, database : Database) -> None:
         super().__init__()
@@ -151,6 +157,18 @@ class Connection(_Resource):
     def get(self, connection_uuid : str):
         return format_grpc_to_json(self.servicer.GetConnection(grpc_connection_id(connection_uuid), None))
 
+class PolicyRuleIds(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.servicer.ListPolicyRuleIds(Empty(), None))
+
+class PolicyRules(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.servicer.ListPolicyRules(Empty(), None))
+
+class PolicyRule(_Resource):
+    def get(self, policy_rule_uuid : str):
+        return format_grpc_to_json(self.servicer.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid), None))
+
 class DumpText(Resource):
     def __init__(self, database : Database) -> None:
         super().__init__()
@@ -219,6 +237,10 @@ RESOURCES = [
     ('api.connections',    Connections,   '/context/<string:context_uuid>/service/<path:service_uuid>/connections'),
     ('api.connection',     Connection,    '/connection/<path:connection_uuid>'),
 
+    ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'),
+    ('api.policyrules',    PolicyRules,   '/policyrules'),
+    ('api.policyrule',     PolicyRule,    '/policyrule/<string:policyrule_uuid>'),
+
     ('api.dump.text',      DumpText,      '/dump/text'),
     ('api.dump.html',      DumpHtml,      '/dump/html'),
 ]
diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py
index 140cbff686eaf5b430f23ee987a9335ecb04c0f5..1cf929cfa578e8bbf8f95885cc2a7bc7e7b9f3ef 100644
--- a/src/context/tests/Objects.py
+++ b/src/context/tests/Objects.py
@@ -23,6 +23,7 @@ from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
 from common.tools.object_factory.Link import json_link, json_link_id
 from common.tools.object_factory.Service import json_service_id, json_service_l3nm_planned
 from common.tools.object_factory.Topology import json_topology, json_topology_id
+from common.tools.object_factory.PolicyRule import json_policy_rule, json_policy_rule_id
 
 
 # ----- Context --------------------------------------------------------------------------------------------------------
@@ -197,3 +198,9 @@ CONNECTION_R1_R3_SVCIDS = [SERVICE_R1_R2_ID, SERVICE_R2_R3_ID]
 CONNECTION_R1_R3        = json_connection(
     CONNECTION_R1_R3_UUID, service_id=SERVICE_R1_R3_ID, path_hops_endpoint_ids=CONNECTION_R1_R3_EPIDS,
     sub_service_ids=CONNECTION_R1_R3_SVCIDS)
+
+
+# ----- PolicyRule -------------------------------------------------------------------------------------------------------
+POLICY_RULE_UUID = '56380225-3e40-4f74-9162-529f8dcb96a1'
+POLICY_RULE_ID   = json_policy_rule_id(POLICY_RULE_UUID)
+POLICY_RULE      = json_policy_rule(POLICY_RULE_UUID)
diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py
index 3109ef13dea98d4a56d661871b1c38ee2296f890..022c0472039d526e488f8a69096fae8c0edbdb48 100644
--- a/src/context/tests/test_unitary.py
+++ b/src/context/tests/test_unitary.py
@@ -27,6 +27,7 @@ from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId,
     DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId,
     ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId)
+from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule)
 from common.type_checkers.Assertions import (
     validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids,
     validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids,
@@ -44,7 +45,8 @@ from .Objects import (
     CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID,
     DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2,
     LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3,
-    SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID)
+    SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID,
+    POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID)
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
@@ -68,8 +70,8 @@ REDIS_CONFIG = {
 }
 
 SCENARIOS = [
-    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          ),
-    ('all_redis',    DatabaseBackendEnum.REDIS,    REDIS_CONFIG, MessageBrokerBackendEnum.REDIS,    REDIS_CONFIG),
+    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          )
+#    ('all_redis',    DatabaseBackendEnum.REDIS,    REDIS_CONFIG, MessageBrokerBackendEnum.REDIS,    REDIS_CONFIG),
 ]
 
 @pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS)
@@ -1169,6 +1171,101 @@ def test_grpc_connection(
     assert len(db_entries) == 0
 
 
+def test_grpc_policy(
+    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
+    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
+    context_database = context_db_mb[0]
+
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    context_database.clear_all()
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(context_client_grpc)
+    #events_collector.start()
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    POLICY_ID = 'no-uuid'
+    DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}}
+
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID))
+
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 0
+
+    response = context_client_grpc.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 0
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry))  # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE))
+    assert response.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=1)
+    # assert isinstance(events[0], PolicyEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE))
+    assert response.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 2
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID))
+    assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 1
+    assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID
+
+    response = context_client_grpc.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 1
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=2)
+
+    # assert isinstance(events[0], PolicyEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID
+
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    # events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+
+
 # ----- Test REST API methods ------------------------------------------------------------------------------------------
 
 def test_rest_populate_database(
@@ -1224,6 +1321,22 @@ def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=
     reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid))
     validate_service(reply)
 
+def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid))
+    #validate_slice_ids(reply)
+
+def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/slices'.format(context_uuid))
+    #validate_slices(reply)
+
+#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+#    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+#    slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='')
+#    reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid))
+#    #validate_slice(reply)
+
 def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
     reply = do_rest_request('/device_ids')
     validate_device_ids(reply)
@@ -1267,6 +1380,19 @@ def test_rest_get_connection(context_service_rest : RestServer): # pylint: disab
     reply = do_rest_request('/connection/{:s}'.format(connection_uuid))
     validate_connection(reply)
 
+def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/policyrule_ids')
+    #validate_policyrule_ids(reply)
+
+def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/policyrules')
+    #validate_policyrules(reply)
+
+#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+#    policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='')
+#    reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid))
+#    #validate_policyrule(reply)
+
 
 # ----- Test misc. Context internal tools ------------------------------------------------------------------------------
 
diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py
index 069c07ce40e43192b74519b2175e7e10c638cd20..b8ff795fbd9466874b07f1f752fce682ea741111 100644
--- a/src/device/service/drivers/p4/p4_driver.py
+++ b/src/device/service/drivers/p4/p4_driver.py
@@ -28,7 +28,7 @@ from .p4_common import matches_ipv4, matches_ipv6, valid_port,\
     P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\
     P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER,\
     P4_VAL_DEF_TIMEOUT
-from .p4_manager import P4Manager, get_api_version, KEY_TABLE,\
+from .p4_manager import P4Manager, KEY_TABLE,\
     KEY_ACTION_PROFILE, KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\
     KEY_CTL_PKT_METADATA
 from .p4_client import WriteOperation
@@ -127,8 +127,7 @@ class P4Driver(_Driver):
             except Exception as ex:  # pylint: disable=broad-except
                 raise Exception(ex) from ex
 
-            LOGGER.info("\tConnected via P4Runtime version %s",
-                        get_api_version())
+            LOGGER.info("\tConnected via P4Runtime")
             self.__started.set()
 
             return True
diff --git a/src/device/service/drivers/p4/p4_manager.py b/src/device/service/drivers/p4/p4_manager.py
index 65f8602ea30fa2d8cd06b09655ee4ee63d045a97..178487250ea3a5652690fb39f1631a0133aec4e3 100644
--- a/src/device/service/drivers/p4/p4_manager.py
+++ b/src/device/service/drivers/p4/p4_manager.py
@@ -55,7 +55,7 @@ LOGGER = logging.getLogger(__name__)
 CONTEXT = Context()
 
 # Global P4Runtime client
-CLIENT = None
+CLIENTS = {}
 
 # Constant P4 entities
 KEY_TABLE = "table"
@@ -76,25 +76,6 @@ def get_context():
     """
     return CONTEXT
 
-
-def get_client():
-    """
-    Return P4 client.
-
-    :return: P4Runtime client object
-    """
-    return CLIENT
-
-
-def get_api_version():
-    """
-    Get the supported P4Runtime API version.
-
-    :return: API version
-    """
-    return CLIENT.api_version()
-
-
 def get_table_type(table):
     """
     Assess the type of P4 table based upon the matching scheme.
@@ -136,171 +117,28 @@ def match_type_to_str(match_type):
     return None
 
 
-def insert_table_entry_exact(
-        table_name, match_map, action_name, action_params, metadata,
-        cnt_pkt=-1, cnt_byte=-1):
-    """
-    Insert an entry into an exact match table.
-
-    :param table_name: P4 table name
-    :param match_map: Map of match operations
-    :param action_name: Action name
-    :param action_params: Map of action parameters
-    :param metadata: table metadata
-    :param cnt_pkt: packet count
-    :param cnt_byte: byte count
-    :return: inserted entry
-    """
-    assert match_map, "Table entry without match operations is not accepted"
-    assert action_name, "Table entry without action is not accepted"
-
-    table_entry = TableEntry(table_name)(action=action_name)
-
-    for match_k, match_v in match_map.items():
-        table_entry.match[match_k] = match_v
-
-    for action_k, action_v in action_params.items():
-        table_entry.action[action_k] = action_v
-
-    if metadata:
-        table_entry.metadata = metadata
-
-    if cnt_pkt > 0:
-        table_entry.counter_data.packet_count = cnt_pkt
-
-    if cnt_byte > 0:
-        table_entry.counter_data.byte_count = cnt_byte
-
-    ex_msg = ""
-    try:
-        table_entry.insert()
-        LOGGER.info("Inserted exact table entry: %s", table_entry)
-    except (P4RuntimeException, P4RuntimeWriteException) as ex:
-        raise P4RuntimeException from ex
-
-    # Table entry exists, needs to be modified
-    if "ALREADY_EXISTS" in ex_msg:
-        table_entry.modify()
-        LOGGER.info("Updated exact table entry: %s", table_entry)
-
-    return table_entry
-
-
-def insert_table_entry_ternary(
-        table_name, match_map, action_name, action_params, metadata,
-        priority, cnt_pkt=-1, cnt_byte=-1):
-    """
-    Insert an entry into a ternary match table.
-
-    :param table_name: P4 table name
-    :param match_map: Map of match operations
-    :param action_name: Action name
-    :param action_params: Map of action parameters
-    :param metadata: table metadata
-    :param priority: entry priority
-    :param cnt_pkt: packet count
-    :param cnt_byte: byte count
-    :return: inserted entry
-    """
-    assert match_map, "Table entry without match operations is not accepted"
-    assert action_name, "Table entry without action is not accepted"
-
-    table_entry = TableEntry(table_name)(action=action_name)
-
-    for match_k, match_v in match_map.items():
-        table_entry.match[match_k] = match_v
-
-    for action_k, action_v in action_params.items():
-        table_entry.action[action_k] = action_v
-
-    table_entry.priority = priority
-
-    if metadata:
-        table_entry.metadata = metadata
-
-    if cnt_pkt > 0:
-        table_entry.counter_data.packet_count = cnt_pkt
-
-    if cnt_byte > 0:
-        table_entry.counter_data.byte_count = cnt_byte
-
-    ex_msg = ""
-    try:
-        table_entry.insert()
-        LOGGER.info("Inserted ternary table entry: %s", table_entry)
-    except (P4RuntimeException, P4RuntimeWriteException) as ex:
-        raise P4RuntimeException from ex
-
-    # Table entry exists, needs to be modified
-    if "ALREADY_EXISTS" in ex_msg:
-        table_entry.modify()
-        LOGGER.info("Updated ternary table entry: %s", table_entry)
-
-    return table_entry
-
-
-def insert_table_entry_range(
-        table_name, match_map, action_name, action_params, metadata,
-        priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
-    """
-    Insert an entry into a range match table.
-
-    :param table_name: P4 table name
-    :param match_map: Map of match operations
-    :param action_name: Action name
-    :param action_params: Map of action parameters
-    :param metadata: table metadata
-    :param priority: entry priority
-    :param cnt_pkt: packet count
-    :param cnt_byte: byte count
-    :return: inserted entry
-    """
-    assert match_map, "Table entry without match operations is not accepted"
-    assert action_name, "Table entry without action is not accepted"
-
-    raise NotImplementedError(
-        "Range-based table insertion not implemented yet")
-
-
-def insert_table_entry_optional(
-        table_name, match_map, action_name, action_params, metadata,
-        priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
-    """
-    Insert an entry into an optional match table.
-
-    :param table_name: P4 table name
-    :param match_map: Map of match operations
-    :param action_name: Action name
-    :param action_params: Map of action parameters
-    :param metadata: table metadata
-    :param priority: entry priority
-    :param cnt_pkt: packet count
-    :param cnt_byte: byte count
-    :return: inserted entry
-    """
-    assert match_map, "Table entry without match operations is not accepted"
-    assert action_name, "Table entry without action is not accepted"
-
-    raise NotImplementedError(
-        "Optional-based table insertion not implemented yet")
-
 
 class P4Manager:
     """
     Class to manage the runtime entries of a P4 pipeline.
     """
+    local_client = None
+    key_id = None
 
     def __init__(self, device_id: int, ip_address: str, port: int,
                  election_id: tuple, role_name=None, ssl_options=None):
-        global CLIENT
+        global CLIENTS
 
         self.__id = device_id
         self.__ip_address = ip_address
         self.__port = int(port)
         self.__endpoint = f"{self.__ip_address}:{self.__port}"
-        CLIENT = P4RuntimeClient(
+        self.key_id = ip_address+str(port)
+        CLIENTS[self.key_id] = P4RuntimeClient(
             self.__id, self.__endpoint, election_id, role_name, ssl_options)
         self.__p4info = None
+        
+        self.local_client = CLIENTS[self.key_id]
 
         # Internal memory for whitebox management
         # | -> P4 entities
@@ -339,27 +177,27 @@ class P4Manager:
         # Forwarding pipeline is only set iff both files are present
         if p4bin_path and p4info_path:
             try:
-                CLIENT.set_fwd_pipe_config(p4info_path, p4bin_path)
+                self.local_client.set_fwd_pipe_config(p4info_path, p4bin_path)
             except FileNotFoundError as ex:
                 LOGGER.critical(ex)
-                CLIENT.tear_down()
+                self.local_client.tear_down()
                 raise FileNotFoundError(ex) from ex
             except P4RuntimeException as ex:
                 LOGGER.critical("Error when setting config")
                 LOGGER.critical(ex)
-                CLIENT.tear_down()
+                self.local_client.tear_down()
                 raise P4RuntimeException(ex) from ex
             except Exception as ex:  # pylint: disable=broad-except
                 LOGGER.critical("Error when setting config")
-                CLIENT.tear_down()
+                self.local_client.tear_down()
                 raise Exception(ex) from ex
 
         try:
-            self.__p4info = CLIENT.get_p4info()
+            self.__p4info = self.local_client.get_p4info()
         except P4RuntimeException as ex:
             LOGGER.critical("Error when retrieving P4Info")
             LOGGER.critical(ex)
-            CLIENT.tear_down()
+            self.local_client.tear_down()
             raise P4RuntimeException(ex) from ex
 
         CONTEXT.set_p4info(self.__p4info)
@@ -375,14 +213,15 @@ class P4Manager:
 
         :return: void
         """
-        global CLIENT
+        global CLIENTS
 
         # gRPC client must already be instantiated
-        assert CLIENT
+        assert self.local_client
 
         # Trigger connection tear down with the P4Runtime server
-        CLIENT.tear_down()
-        CLIENT = None
+        self.local_client.tear_down()
+        # Remove client entry from global dictionary
+        CLIENTS.pop(self.key_id)
         self.__clear()
         LOGGER.info("P4Runtime manager stopped")
 
@@ -723,7 +562,7 @@ class P4Manager:
 
         try:
             for count, table_entry in enumerate(
-                    TableEntry(table_name)(action=action_name).read()):
+                    TableEntry(self.local_client, table_name)(action=action_name).read()):
                 LOGGER.debug(
                     "Table %s - Entry %d\n%s", table_name, count, table_entry)
                 self.table_entries[table_name].append(table_entry)
@@ -856,6 +695,154 @@ class P4Manager:
             )
         return None
 
+    def insert_table_entry_exact(self,
+            table_name, match_map, action_name, action_params, metadata,
+            cnt_pkt=-1, cnt_byte=-1):
+        """
+        Insert an entry into an exact match table.
+    
+        :param table_name: P4 table name
+        :param match_map: Map of match operations
+        :param action_name: Action name
+        :param action_params: Map of action parameters
+        :param metadata: table metadata
+        :param cnt_pkt: packet count
+        :param cnt_byte: byte count
+        :return: inserted entry
+        """
+        assert match_map, "Table entry without match operations is not accepted"
+        assert action_name, "Table entry without action is not accepted"
+    
+        table_entry = TableEntry(self.local_client, table_name)(action=action_name)
+    
+        for match_k, match_v in match_map.items():
+            table_entry.match[match_k] = match_v
+    
+        for action_k, action_v in action_params.items():
+            table_entry.action[action_k] = action_v
+    
+        if metadata:
+            table_entry.metadata = metadata
+    
+        if cnt_pkt > 0:
+            table_entry.counter_data.packet_count = cnt_pkt
+    
+        if cnt_byte > 0:
+            table_entry.counter_data.byte_count = cnt_byte
+    
+        ex_msg = ""
+        try:
+            table_entry.insert()
+            LOGGER.info("Inserted exact table entry: %s", table_entry)
+        except (P4RuntimeException, P4RuntimeWriteException) as ex:
+            raise P4RuntimeException from ex
+    
+        # Table entry exists, needs to be modified
+        if "ALREADY_EXISTS" in ex_msg:
+            table_entry.modify()
+            LOGGER.info("Updated exact table entry: %s", table_entry)
+    
+        return table_entry
+    
+    
+    def insert_table_entry_ternary(self,
+            table_name, match_map, action_name, action_params, metadata,
+            priority, cnt_pkt=-1, cnt_byte=-1):
+        """
+        Insert an entry into a ternary match table.
+    
+        :param table_name: P4 table name
+        :param match_map: Map of match operations
+        :param action_name: Action name
+        :param action_params: Map of action parameters
+        :param metadata: table metadata
+        :param priority: entry priority
+        :param cnt_pkt: packet count
+        :param cnt_byte: byte count
+        :return: inserted entry
+        """
+        assert match_map, "Table entry without match operations is not accepted"
+        assert action_name, "Table entry without action is not accepted"
+    
+        table_entry = TableEntry(self.local_client, table_name)(action=action_name)
+    
+        for match_k, match_v in match_map.items():
+            table_entry.match[match_k] = match_v
+    
+        for action_k, action_v in action_params.items():
+            table_entry.action[action_k] = action_v
+    
+        table_entry.priority = priority
+    
+        if metadata:
+            table_entry.metadata = metadata
+    
+        if cnt_pkt > 0:
+            table_entry.counter_data.packet_count = cnt_pkt
+    
+        if cnt_byte > 0:
+            table_entry.counter_data.byte_count = cnt_byte
+    
+        ex_msg = ""
+        try:
+            table_entry.insert()
+            LOGGER.info("Inserted ternary table entry: %s", table_entry)
+        except (P4RuntimeException, P4RuntimeWriteException) as ex:
+            raise P4RuntimeException from ex
+    
+        # Table entry exists, needs to be modified
+        if "ALREADY_EXISTS" in ex_msg:
+            table_entry.modify()
+            LOGGER.info("Updated ternary table entry: %s", table_entry)
+    
+        return table_entry
+    
+    
+    def insert_table_entry_range(self,
+            table_name, match_map, action_name, action_params, metadata,
+            priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
+        """
+        Insert an entry into a range match table.
+    
+        :param table_name: P4 table name
+        :param match_map: Map of match operations
+        :param action_name: Action name
+        :param action_params: Map of action parameters
+        :param metadata: table metadata
+        :param priority: entry priority
+        :param cnt_pkt: packet count
+        :param cnt_byte: byte count
+        :return: inserted entry
+        """
+        assert match_map, "Table entry without match operations is not accepted"
+        assert action_name, "Table entry without action is not accepted"
+    
+        raise NotImplementedError(
+            "Range-based table insertion not implemented yet")
+    
+    
+    def insert_table_entry_optional(self,
+            table_name, match_map, action_name, action_params, metadata,
+            priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
+        """
+        Insert an entry into an optional match table.
+    
+        :param table_name: P4 table name
+        :param match_map: Map of match operations
+        :param action_name: Action name
+        :param action_params: Map of action parameters
+        :param metadata: table metadata
+        :param priority: entry priority
+        :param cnt_pkt: packet count
+        :param cnt_byte: byte count
+        :return: inserted entry
+        """
+        assert match_map, "Table entry without match operations is not accepted"
+        assert action_name, "Table entry without action is not accepted"
+    
+        raise NotImplementedError(
+            "Optional-based table insertion not implemented yet")
+
     def insert_table_entry(self, table_name,
                            match_map, action_name, action_params,
                            priority, metadata=None, cnt_pkt=-1, cnt_byte=-1):
@@ -889,26 +876,26 @@ class P4Manager:
 
         # Exact match is supported
         if get_table_type(table) == p4info_pb2.MatchField.EXACT:
-            return insert_table_entry_exact(
+            return self.insert_table_entry_exact(
                 table_name, match_map, action_name, action_params, metadata,
                 cnt_pkt, cnt_byte)
 
         # Ternary and LPM matches are supported
         if get_table_type(table) in \
                 [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]:
-            return insert_table_entry_ternary(
+            return self.insert_table_entry_ternary(
                 table_name, match_map, action_name, action_params, metadata,
                 priority, cnt_pkt, cnt_byte)
 
         # TODO: Cover RANGE match  # pylint: disable=W0511
         if get_table_type(table) == p4info_pb2.MatchField.RANGE:
-            return insert_table_entry_range(
+            return self.insert_table_entry_range(
                 table_name, match_map, action_name, action_params, metadata,
                 priority, cnt_pkt, cnt_byte)
 
         # TODO: Cover OPTIONAL match  # pylint: disable=W0511
         if get_table_type(table) == p4info_pb2.MatchField.OPTIONAL:
-            return insert_table_entry_optional(
+            return self.insert_table_entry_optional(
                 table_name, match_map, action_name, action_params, metadata,
                 priority, cnt_pkt, cnt_byte)
 
@@ -935,7 +922,7 @@ class P4Manager:
             LOGGER.error(msg)
             raise UserError(msg)
 
-        table_entry = TableEntry(table_name)(action=action_name)
+        table_entry = TableEntry(self.local_client, table_name)(action=action_name)
 
         for match_k, match_v in match_map.items():
             table_entry.match[match_k] = match_v
@@ -979,7 +966,7 @@ class P4Manager:
             LOGGER.error(msg)
             raise UserError(msg)
 
-        TableEntry(table_name).read(function=lambda x: x.delete())
+        TableEntry(self.local_client, table_name).read(function=lambda x: x.delete())
         LOGGER.info("Deleted all entries from table: %s", table_name)
 
     def print_table_entries_spec(self, table_name):
@@ -1179,7 +1166,7 @@ class P4Manager:
         self.counter_entries[cnt_name] = []
 
         try:
-            for count, cnt_entry in enumerate(CounterEntry(cnt_name).read()):
+            for count, cnt_entry in enumerate(CounterEntry(self.local_client, cnt_name).read()):
                 LOGGER.debug(
                     "Counter %s - Entry %d\n%s", cnt_name, count, cnt_entry)
                 self.counter_entries[cnt_name].append(cnt_entry)
@@ -1298,7 +1285,7 @@ class P4Manager:
         assert cnt, \
             "P4 pipeline does not implement counter " + cnt_name
 
-        cnt_entry = CounterEntry(cnt_name)
+        cnt_entry = CounterEntry(self.local_client, cnt_name)
 
         if index:
             cnt_entry.index = index
@@ -1325,7 +1312,7 @@ class P4Manager:
         assert cnt, \
             "P4 pipeline does not implement counter " + cnt_name
 
-        cnt_entry = CounterEntry(cnt_name)
+        cnt_entry = CounterEntry(self.local_client, cnt_name)
         cnt_entry.clear_data()
         LOGGER.info("Cleared data of counter entry: %s", cnt_entry)
 
@@ -1394,7 +1381,7 @@ class P4Manager:
 
         try:
             for count, d_cnt_entry in enumerate(
-                    DirectCounterEntry(d_cnt_name).read()):
+                    DirectCounterEntry(self.local_client, d_cnt_name).read()):
                 LOGGER.debug(
                     "Direct counter %s - Entry %d\n%s",
                     d_cnt_name, count, d_cnt_entry)
@@ -1530,7 +1517,7 @@ class P4Manager:
         assert match_map,\
             "Direct counter entry without match operations is not accepted"
 
-        d_cnt_entry = DirectCounterEntry(d_cnt_name)
+        d_cnt_entry = DirectCounterEntry(self.local_client, d_cnt_name)
 
         for match_k, match_v in match_map.items():
             d_cnt_entry.table_entry.match[match_k] = match_v
@@ -1559,7 +1546,7 @@ class P4Manager:
         assert d_cnt, \
             "P4 pipeline does not implement direct counter " + d_cnt_name
 
-        d_cnt_entry = DirectCounterEntry(d_cnt_name)
+        d_cnt_entry = DirectCounterEntry(self.local_client, d_cnt_name)
         d_cnt_entry.clear_data()
         LOGGER.info("Cleared direct counter entry: %s", d_cnt_entry)
 
@@ -1627,7 +1614,7 @@ class P4Manager:
         self.meter_entries[meter_name] = []
 
         try:
-            for count, meter_entry in enumerate(MeterEntry(meter_name).read()):
+            for count, meter_entry in enumerate(MeterEntry(self.local_client, meter_name).read()):
                 LOGGER.debug(
                     "Meter %s - Entry %d\n%s", meter_name, count, meter_entry)
                 self.meter_entries[meter_name].append(meter_entry)
@@ -1756,7 +1743,7 @@ class P4Manager:
         assert meter, \
             "P4 pipeline does not implement meter " + meter_name
 
-        meter_entry = MeterEntry(meter_name)
+        meter_entry = MeterEntry(self.local_client, meter_name)
 
         if index:
             meter_entry.index = index
@@ -1789,7 +1776,7 @@ class P4Manager:
         assert meter, \
             "P4 pipeline does not implement meter " + meter_name
 
-        meter_entry = MeterEntry(meter_name)
+        meter_entry = MeterEntry(self.local_client, meter_name)
         meter_entry.clear_config()
         LOGGER.info("Cleared meter entry: %s", meter_entry)
 
@@ -1858,7 +1845,7 @@ class P4Manager:
 
         try:
             for count, d_meter_entry in enumerate(
-                    MeterEntry(d_meter_name).read()):
+                    MeterEntry(self.local_client, d_meter_name).read()):
                 LOGGER.debug(
                     "Direct meter %s - Entry %d\n%s",
                     d_meter_name, count, d_meter_entry)
@@ -1998,7 +1985,7 @@ class P4Manager:
         assert match_map,\
             "Direct meter entry without match operations is not accepted"
 
-        d_meter_entry = DirectMeterEntry(d_meter_name)
+        d_meter_entry = DirectMeterEntry(self.local_client, d_meter_name)
 
         for match_k, match_v in match_map.items():
             d_meter_entry.table_entry.match[match_k] = match_v
@@ -2031,7 +2018,7 @@ class P4Manager:
         assert d_meter, \
             "P4 pipeline does not implement direct meter " + d_meter_name
 
-        d_meter_entry = DirectMeterEntry(d_meter_name)
+        d_meter_entry = DirectMeterEntry(self.local_client, d_meter_name)
         d_meter_entry.clear_config()
         LOGGER.info("Cleared direct meter entry: %s", d_meter_entry)
 
@@ -2100,7 +2087,7 @@ class P4Manager:
 
         try:
             for count, ap_entry in enumerate(
-                    ActionProfileMember(ap_name).read()):
+                    ActionProfileMember(self.local_client, ap_name).read()):
                 LOGGER.debug(
                     "Action profile member %s - Entry %d\n%s",
                     ap_name, count, ap_entry)
@@ -2230,7 +2217,7 @@ class P4Manager:
         assert act_p, \
             "P4 pipeline does not implement action profile " + ap_name
 
-        ap_member_entry = ActionProfileMember(ap_name)(
+        ap_member_entry = ActionProfileMember(self.local_client, ap_name)(
             member_id=member_id, action=action_name)
 
         for action_k, action_v in action_params.items():
@@ -2267,7 +2254,7 @@ class P4Manager:
         assert act_p, \
             "P4 pipeline does not implement action profile " + ap_name
 
-        ap_member_entry = ActionProfileMember(ap_name)(
+        ap_member_entry = ActionProfileMember(self.local_client, ap_name)(
             member_id=member_id, action=action_name)
         ap_member_entry.delete()
         LOGGER.info("Deleted action profile member entry: %s", ap_member_entry)
@@ -2364,7 +2351,7 @@ class P4Manager:
 
         try:
             for count, ap_entry in enumerate(
-                    ActionProfileGroup(ap_name).read()):
+                    ActionProfileGroup(self.local_client, ap_name).read()):
                 LOGGER.debug("Action profile group %s - Entry %d\n%s",
                              ap_name, count, ap_entry)
                 self.action_profile_groups[ap_name].append(ap_entry)
@@ -2483,7 +2470,7 @@ class P4Manager:
         assert ap, \
             "P4 pipeline does not implement action profile " + ap_name
 
-        ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id)
+        ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id)
 
         if members:
             for m in members:
@@ -2519,7 +2506,7 @@ class P4Manager:
         assert ap, \
             "P4 pipeline does not implement action profile " + ap_name
 
-        ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id)
+        ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id)
         ap_group_entry.delete()
         LOGGER.info("Deleted action profile group entry: %s", ap_group_entry)
 
@@ -2537,7 +2524,7 @@ class P4Manager:
         assert ap, \
             "P4 pipeline does not implement action profile " + ap_name
 
-        ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id)
+        ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id)
         ap_group_entry.clear()
         LOGGER.info("Cleared action profile group entry: %s", ap_group_entry)
 
@@ -2631,7 +2618,7 @@ class P4Manager:
         self.multicast_groups[group_id] = None
 
         try:
-            mcast_group = MulticastGroupEntry(group_id).read()
+            mcast_group = MulticastGroupEntry(self.local_client, group_id).read()
             LOGGER.debug("Multicast group %d\n%s", group_id, mcast_group)
             self.multicast_groups[group_id] = mcast_group
             return self.multicast_groups[group_id]
@@ -2724,7 +2711,7 @@ class P4Manager:
         assert ports, \
             "No multicast group ports are provided"
 
-        mcast_group = MulticastGroupEntry(group_id)
+        mcast_group = MulticastGroupEntry(self.local_client, group_id)
         for p in ports:
             mcast_group.add(p, 1)
 
@@ -2756,7 +2743,7 @@ class P4Manager:
         assert group_id > 0, \
             "Multicast group " + group_id + " must be > 0"
 
-        mcast_group = MulticastGroupEntry(group_id)
+        mcast_group = MulticastGroupEntry(self.local_client, group_id)
         mcast_group.delete()
 
         if group_id in self.multicast_groups:
@@ -2772,7 +2759,7 @@ class P4Manager:
 
         :return: void
         """
-        for mcast_group in MulticastGroupEntry().read():
+        for mcast_group in MulticastGroupEntry(self.local_client).read():
             gid = mcast_group.group_id
             mcast_group.delete()
             del self.multicast_groups[gid]
@@ -2828,7 +2815,7 @@ class P4Manager:
         self.clone_session_entries[session_id] = None
 
         try:
-            session = CloneSessionEntry(session_id).read()
+            session = CloneSessionEntry(self.local_client, session_id).read()
             LOGGER.debug("Clone session %d\n%s", session_id, session)
             self.clone_session_entries[session_id] = session
             return self.clone_session_entries[session_id]
@@ -2923,7 +2910,7 @@ class P4Manager:
         assert ports, \
             "No clone session ports are provided"
 
-        session = CloneSessionEntry(session_id)
+        session = CloneSessionEntry(self.local_client, session_id)
         for p in ports:
             session.add(p, 1)
 
@@ -2955,7 +2942,7 @@ class P4Manager:
         assert session_id > 0, \
             "Clone session " + session_id + " must be > 0"
 
-        session = CloneSessionEntry(session_id)
+        session = CloneSessionEntry(self.local_client, session_id)
         session.delete()
 
         if session_id in self.clone_session_entries:
@@ -2971,7 +2958,7 @@ class P4Manager:
 
         :return: void
         """
-        for e in CloneSessionEntry().read():
+        for e in CloneSessionEntry(self.local_client).read():
             sid = e.session_id
             e.delete()
             del self.clone_session_entries[sid]
@@ -3052,7 +3039,7 @@ class P4Manager:
                            "No controller packet metadata in the pipeline\n")
             return None
 
-        packet_in = PacketOut()
+        packet_in = PacketIn(self.local_client)
         packet_in.payload = payload
         if metadata:
             for name, value in metadata.items():
@@ -3090,7 +3077,7 @@ class P4Manager:
         _t = Thread(target=_sniff_packet, args=(captured_packet,))
         _t.start()
         # P4Runtime client sends the packet to the switch
-        CLIENT.stream_in_q["packet"].put(packet_in)
+        self.local_client.stream_in_q["packet"].put(packet_in)
         _t.join()
         LOGGER.info("Packet-in sent: %s", packet_in)
 
@@ -3111,7 +3098,7 @@ class P4Manager:
                            "No controller packet metadata in the pipeline\n")
             return None
 
-        packet_out = PacketOut()
+        packet_out = PacketOut(self.local_client)
         packet_out.payload = payload
         if metadata:
             for name, value in metadata.items():
@@ -3654,12 +3641,14 @@ class _EntityBase:
     """
     Basic entity.
     """
+    local_client = None
 
-    def __init__(self, entity_type, p4runtime_cls, modify_only=False):
+    def __init__(self, p4_client, entity_type, p4runtime_cls, modify_only=False):
         self._init = False
         self._entity_type = entity_type
         self._entry = p4runtime_cls()
         self._modify_only = modify_only
+        self.local_client = p4_client
 
     def __dir__(self):
         d = ["msg", "read"]
@@ -3696,7 +3685,7 @@ class _EntityBase:
         update = p4runtime_pb2.Update()
         update.type = type_
         getattr(update.entity, self._entity_type.name).CopyFrom(self._entry)
-        CLIENT.write_update(update)
+        self.local_client.write_update(update)
 
     def insert(self):
         """
@@ -3747,7 +3736,7 @@ class _EntityBase:
         entity = p4runtime_pb2.Entity()
         getattr(entity, self._entity_type.name).CopyFrom(self._entry)
 
-        iterator = CLIENT.read_one(entity)
+        iterator = self.local_client.read_one(entity)
 
         # Cannot use a (simpler) generator here as we need to
         # decorate __next__ with @parse_p4runtime_error.
@@ -3794,9 +3783,9 @@ class _P4EntityBase(_EntityBase):
     Basic P4 entity.
     """
 
-    def __init__(self, p4_type, entity_type, p4runtime_cls, name=None,
+    def __init__(self, p4_client, p4_type, entity_type, p4runtime_cls, name=None,
                  modify_only=False):
-        super().__init__(entity_type, p4runtime_cls, modify_only)
+        super().__init__(p4_client, entity_type, p4runtime_cls, modify_only)
         self._p4_type = p4_type
         if name is None:
             raise UserError(
@@ -3825,8 +3814,8 @@ class ActionProfileMember(_P4EntityBase):
     P4 action profile member.
     """
 
-    def __init__(self, action_profile_name=None):
-        super().__init__(
+    def __init__(self, p4_client, action_profile_name=None):
+        super().__init__( p4_client,
             P4Type.action_profile, P4RuntimeEntity.action_profile_member,
             p4runtime_pb2.ActionProfileMember, action_profile_name)
         self.member_id = 0
@@ -3991,8 +3980,8 @@ class ActionProfileGroup(_P4EntityBase):
     P4 action profile group.
     """
 
-    def __init__(self, action_profile_name=None):
-        super().__init__(
+    def __init__(self, p4_client, action_profile_name=None):
+        super().__init__( p4_client,
             P4Type.action_profile, P4RuntimeEntity.action_profile_group,
             p4runtime_pb2.ActionProfileGroup, action_profile_name)
         self.group_id = 0
@@ -4554,8 +4543,8 @@ class TableEntry(_P4EntityBase):
             "oneshot": cls._ActionSpecType.ONESHOT,
         }.get(name, None)
 
-    def __init__(self, table_name=None):
-        super().__init__(
+    def __init__(self, p4_client, table_name=None):
+        super().__init__(p4_client,
             P4Type.table, P4RuntimeEntity.table_entry,
             p4runtime_pb2.TableEntry, table_name)
         self.match = MatchKey(table_name, self._info.match_fields)
@@ -4996,8 +4985,8 @@ class _CounterEntryBase(_P4EntityBase):
     Basic P4 counter entry.
     """
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
+    def __init__(self, p4_client, *args, **kwargs):
+        super().__init__(p4_client, *args, **kwargs)
         self._counter_type = self._info.spec.unit
         self.packet_count = -1
         self.byte_count = -1
@@ -5065,8 +5054,8 @@ class CounterEntry(_CounterEntryBase):
     P4 counter entry.
     """
 
-    def __init__(self, counter_name=None):
-        super().__init__(
+    def __init__(self, p4_client, counter_name=None):
+        super().__init__( p4_client,
             P4Type.counter, P4RuntimeEntity.counter_entry,
             p4runtime_pb2.CounterEntry, counter_name,
             modify_only=True)
@@ -5126,10 +5115,11 @@ To write to the counter, use <self>.modify
 class DirectCounterEntry(_CounterEntryBase):
     """
     Direct P4 counter entry.
-    """
+    """ 
+    local_client = None
 
-    def __init__(self, direct_counter_name=None):
-        super().__init__(
+    def __init__(self, p4_client, direct_counter_name=None):
+        super().__init__( p4_client, 
             P4Type.direct_counter, P4RuntimeEntity.direct_counter_entry,
             p4runtime_pb2.DirectCounterEntry, direct_counter_name,
             modify_only=True)
@@ -5140,7 +5130,8 @@ class DirectCounterEntry(_CounterEntryBase):
         except KeyError as ex:
             raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} "
                                      f"is not a valid table id") from ex
-        self._table_entry = TableEntry(self._direct_table_name)
+        self._table_entry = TableEntry(p4_client, self._direct_table_name)
+        self.local_client = p4_client
         self.__doc__ = f"""
 An entry for direct counter '{direct_counter_name}'
 
@@ -5167,7 +5158,7 @@ To write to the counter, use <self>.modify
             raise UserError("Direct counters are not index-based")
         if name == "table_entry":
             if value is None:
-                self._table_entry = TableEntry(self._direct_table_name)
+                self._table_entry = TableEntry(self.local_client, self._direct_table_name)
                 return
             if not isinstance(value, TableEntry):
                 raise UserError("table_entry must be an instance of TableEntry")
@@ -5221,7 +5212,7 @@ class _MeterEntryBase(_P4EntityBase):
     Basic P4 meter entry.
     """
 
-    def __init__(self, *args, **kwargs):
+    def __init__(self, p4_client, *args, **kwargs):
         super().__init__(*args, **kwargs)
         self._meter_type = self._info.spec.unit
         self.index = -1
@@ -5291,8 +5282,8 @@ class MeterEntry(_MeterEntryBase):
     P4 meter entry.
     """
 
-    def __init__(self, meter_name=None):
-        super().__init__(
+    def __init__(self, p4_client, meter_name=None):
+        super().__init__(p4_client,
             P4Type.meter, P4RuntimeEntity.meter_entry,
             p4runtime_pb2.MeterEntry, meter_name,
             modify_only=True)
@@ -5356,9 +5347,10 @@ class DirectMeterEntry(_MeterEntryBase):
     """
     Direct P4 meter entry.
     """
+    local_client = None
 
-    def __init__(self, direct_meter_name=None):
-        super().__init__(
+    def __init__(self, p4_client, direct_meter_name=None):
+        super().__init__(p4_client,
             P4Type.direct_meter, P4RuntimeEntity.direct_meter_entry,
             p4runtime_pb2.DirectMeterEntry, direct_meter_name,
             modify_only=True)
@@ -5369,7 +5361,8 @@ class DirectMeterEntry(_MeterEntryBase):
         except KeyError as ex:
             raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} "
                                      f"is not a valid table id") from ex
-        self._table_entry = TableEntry(self._direct_table_name)
+        self._table_entry = TableEntry(p4_client, self._direct_table_name)
+        self.local_client = p4_client
         self.__doc__ = f"""
 An entry for direct meter '{direct_meter_name}'
 
@@ -5399,7 +5392,7 @@ To write to the meter, use <self>.modify
             raise UserError("Direct meters are not index-based")
         if name == "table_entry":
             if value is None:
-                self._table_entry = TableEntry(self._direct_table_name)
+                self._table_entry = TableEntry(self.local_client, self._direct_table_name)
                 return
             if not isinstance(value, TableEntry):
                 raise UserError("table_entry must be an instance of TableEntry")
@@ -5531,8 +5524,8 @@ class MulticastGroupEntry(_EntityBase):
     P4 multicast group entry.
     """
 
-    def __init__(self, group_id=0):
-        super().__init__(
+    def __init__(self, p4_client, group_id=0):
+        super().__init__(p4_client,
             P4RuntimeEntity.packet_replication_engine_entry,
             p4runtime_pb2.PacketReplicationEngineEntry)
         self.group_id = group_id
@@ -5609,8 +5602,8 @@ class CloneSessionEntry(_EntityBase):
     P4 clone session entry.
     """
 
-    def __init__(self, session_id=0):
-        super().__init__(
+    def __init__(self, p4_client, session_id=0):
+        super().__init__(p4_client,
             P4RuntimeEntity.packet_replication_engine_entry,
             p4runtime_pb2.PacketReplicationEngineEntry)
         self.session_id = session_id
@@ -5779,8 +5772,9 @@ class PacketIn():
     """
     P4 packet in.
     """
+    local_client = None
 
-    def __init__(self):
+    def __init__(self, p4_client):
         ctrl_pkt_md = P4Objects(P4Type.controller_packet_metadata)
         self.md_info_list = {}
         if "packet_in" in ctrl_pkt_md:
@@ -5788,10 +5782,11 @@ class PacketIn():
             for md_info in self.p4_info.metadata:
                 self.md_info_list[md_info.name] = md_info
         self.packet_in_queue = queue.Queue()
+        self.local_client = p4_client
 
         def _packet_in_recv_func(packet_in_queue):
             while True:
-                msg = CLIENT.get_stream_packet("packet", timeout=None)
+                msg = self.local_client.get_stream_packet("packet", timeout=None)
                 if not msg:
                     break
                 packet_in_queue.put(msg)
@@ -5857,8 +5852,9 @@ class PacketOut:
     """
     P4 packet out.
     """
+    local_client = None
 
-    def __init__(self, payload=b'', **kwargs):
+    def __init__(self, p4_client, payload=b'', **kwargs):
 
         self.p4_info = P4Objects(P4Type.controller_packet_metadata)[
             "packet_out"]
@@ -5868,6 +5864,7 @@ class PacketOut:
         if kwargs:
             for key, value in kwargs.items():
                 self.metadata[key] = value
+        self.local_client = p4_client
 
     def _update_msg(self):
         self._entry = p4runtime_pb2.PacketOut()
@@ -5897,7 +5894,7 @@ class PacketOut:
         self._update_msg()
         msg = p4runtime_pb2.StreamMessageRequest()
         msg.packet.CopyFrom(self._entry)
-        CLIENT.stream_out_q.put(msg)
+        self.local_client.stream_out_q.put(msg)
 
     def str(self):
         """
@@ -5913,13 +5910,16 @@ class IdleTimeoutNotification():
     """
     P4 idle timeout notification.
     """
+    
+    local_client = None
 
-    def __init__(self):
+    def __init__(self, p4_client):
         self.notification_queue = queue.Queue()
+        self.local_client = p4_client.local_client
 
         def _notification_recv_func(notification_queue):
             while True:
-                msg = CLIENT.get_stream_packet("idle_timeout_notification",
+                msg = self.local_client.get_stream_packet("idle_timeout_notification",
                                                timeout=None)
                 if not msg:
                     break
diff --git a/src/monitoring/client/MonitoringClient.py b/src/monitoring/client/MonitoringClient.py
index 73607a081cd57e7c62b9c4e2c5e487868e72d189..5641b9cf3236c5fecfa5c6efe3a03b899c342ea5 100644
--- a/src/monitoring/client/MonitoringClient.py
+++ b/src/monitoring/client/MonitoringClient.py
@@ -22,7 +22,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.proto.context_pb2 import Empty
 from common.proto.monitoring_pb2 import Kpi, KpiDescriptor, KpiId, MonitorKpiRequest, \
     KpiDescriptorList, KpiQuery, KpiList, SubsDescriptor, SubscriptionID, SubsList, \
-    SubsResponse, AlarmDescriptor, AlarmID, AlarmList, AlarmResponse, AlarmSubscription
+    SubsResponse, AlarmDescriptor, AlarmID, AlarmList, AlarmResponse, AlarmSubscription, RawKpiTable
 from common.proto.monitoring_pb2_grpc import MonitoringServiceStub
 
 LOGGER = logging.getLogger(__name__)
@@ -93,7 +93,7 @@ class MonitoringClient:
         return response
 
     @RETRY_DECORATOR
-    def QueryKpiData(self, request : KpiQuery) -> KpiList:
+    def QueryKpiData(self, request : KpiQuery) -> RawKpiTable:
         LOGGER.debug('QueryKpiData: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.QueryKpiData(request)
         LOGGER.debug('QueryKpiData result: {:s}'.format(grpc_message_to_json_string(response)))
diff --git a/src/monitoring/service/AlarmManager.py b/src/monitoring/service/AlarmManager.py
index e5ac8915c3728c7894dc70ab901215dd5a7feb41..873a65d2c8041e6378f84d979bb1fd98d4d61d6b 100644
--- a/src/monitoring/service/AlarmManager.py
+++ b/src/monitoring/service/AlarmManager.py
@@ -1,3 +1,4 @@
+import pytz
 from apscheduler.schedulers.background import BackgroundScheduler
 from apscheduler.executors.pool import ProcessPoolExecutor
 from apscheduler.jobstores.base import JobLookupError
@@ -19,10 +20,16 @@ class AlarmManager():
         end_date=None
         if subscription_timeout_s:
             start_timestamp=time.time()
-            start_date=datetime.fromtimestamp(start_timestamp)
-            end_date=datetime.fromtimestamp(start_timestamp+subscription_timeout_s)
-        self.scheduler.add_job(self.metrics_db.get_alarm_data, args=(alarm_queue,kpi_id, kpiMinValue, kpiMaxValue, inRange, includeMinValue, includeMaxValue, subscription_frequency_ms),trigger='interval', seconds=(subscription_frequency_ms/1000), start_date=start_date, end_date=end_date, id=alarm_id)
+            end_timestamp = start_timestamp + subscription_timeout_s
+            start_date = datetime.utcfromtimestamp(start_timestamp).isoformat()
+            end_date = datetime.utcfromtimestamp(end_timestamp).isoformat()
+
+        job = self.scheduler.add_job(self.metrics_db.get_alarm_data,
+                               args=(alarm_queue,kpi_id, kpiMinValue, kpiMaxValue, inRange, includeMinValue, includeMaxValue, subscription_frequency_ms),
+                               trigger='interval', seconds=(subscription_frequency_ms/1000), start_date=start_date,
+                               end_date=end_date,timezone=pytz.utc, id=str(alarm_id))
         LOGGER.debug(f"Alarm job {alarm_id} succesfully created")
+        #job.remove()
 
     def delete_alarm(self, alarm_id):
         try:
diff --git a/src/monitoring/service/ManagementDBTools.py b/src/monitoring/service/ManagementDBTools.py
index 2387ddde0ab9eecea6c8fc982ba97a94f1a88c98..2185a3986532ad1b8e629cdcdb66079f23995c8f 100644
--- a/src/monitoring/service/ManagementDBTools.py
+++ b/src/monitoring/service/ManagementDBTools.py
@@ -38,7 +38,10 @@ class ManagementDB():
                     kpi_sample_type INTEGER,
                     device_id INTEGER,
                     endpoint_id INTEGER,
-                    service_id INTEGER
+                    service_id INTEGER,
+                    slice_id INTEGER,
+                    connection_id INTEGER,
+                    monitor_flag INTEGER
                 );
             """)
             LOGGER.debug("KPI table created in the ManagementDB")
@@ -84,13 +87,13 @@ class ManagementDB():
             LOGGER.debug(f"Alarm table cannot be created in the ManagementDB. {e}")
             raise Exception
 
-    def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id):
+    def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id):
         try:
             c = self.client.cursor()
-            c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id))
+            c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ? AND slice_id is ? AND connection_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id,slice_id,connection_id))
             data=c.fetchone()
             if data is None:
-                c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id) VALUES (?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id))
+                c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id) VALUES (?,?,?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id))
                 self.client.commit()
                 kpi_id = c.lastrowid
                 LOGGER.debug(f"KPI {kpi_id} succesfully inserted in the ManagementDB")
@@ -245,4 +248,41 @@ class ManagementDB():
             LOGGER.debug(f"Alarms succesfully retrieved from the ManagementDB")
             return data
         except sqlite3.Error as e:
-            LOGGER.debug(f"Alarms cannot be retrieved from the ManagementDB: {e}")
\ No newline at end of file
+            LOGGER.debug(f"Alarms cannot be retrieved from the ManagementDB: {e}")
+
+    def check_monitoring_flag(self,kpi_id):
+        try:
+            c = self.client.cursor()
+            c.execute("SELECT monitor_flag FROM kpi WHERE kpi_id is ?",(kpi_id,))
+            data=c.fetchone()
+            if data is None:
+                LOGGER.debug(f"KPI {kpi_id} does not exists")
+                return None
+            else:
+                if data[0] == 1:
+                    return True
+                elif data[0] == 0:
+                    return False
+                else:
+                    LOGGER.debug(f"KPI {kpi_id} is wrong")
+                    return None
+        except sqlite3.Error as e:
+            LOGGER.debug(f"KPI {kpi_id} cannot be checked from the ManagementDB: {e}")
+
+
+    def set_monitoring_flag(self,kpi_id,flag):
+        try:
+            c = self.client.cursor()
+            data = c.execute("SELECT * FROM kpi WHERE kpi_id is ?",(kpi_id,)).fetchone()
+            if data is None:
+                LOGGER.debug(f"KPI {kpi_id} does not exists")
+                return None
+            else:
+                if flag :
+                    value = 1
+                else:
+                    value = 0
+                c.execute("UPDATE kpi SET monitor_flag = ? WHERE kpi_id is ?",(value,kpi_id))
+                return True
+        except sqlite3.Error as e:
+            LOGGER.debug(f"KPI {kpi_id} cannot be checked from the ManagementDB: {e}")
\ No newline at end of file
diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py
index 16e6373f542656b4c172c8d619bf3f17ca5df404..1d3888d5348bdbe2995f077310ca448827290382 100644
--- a/src/monitoring/service/MetricsDBTools.py
+++ b/src/monitoring/service/MetricsDBTools.py
@@ -87,6 +87,8 @@ class MetricsDB():
                     'device_id SYMBOL,' \
                     'endpoint_id SYMBOL,' \
                     'service_id SYMBOL,' \
+                    'slice_id SYMBOL,' \
+                    'connection_id SYMBOL,' \
                     'timestamp TIMESTAMP,' \
                     'kpi_value DOUBLE)' \
                     'TIMESTAMP(timestamp);'
@@ -97,7 +99,7 @@ class MetricsDB():
             LOGGER.debug(f"Table {self.table} cannot be created. {e}")
             raise Exception
 
-    def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, kpi_value):
+    def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, slice_id, connection_id, kpi_value):
         counter = 0
         while (counter < self.retries):
             try:
@@ -109,7 +111,9 @@ class MetricsDB():
                             'kpi_sample_type': kpi_sample_type,
                             'device_id': device_id,
                             'endpoint_id': endpoint_id,
-                            'service_id': service_id},
+                            'service_id': service_id,
+                            'slice_id': slice_id,
+                            'connection_id': connection_id,},
                         columns={
                             'kpi_value': kpi_value},
                         at=datetime.datetime.fromtimestamp(time))
@@ -170,11 +174,54 @@ class MetricsDB():
                 if connection:
                     connection.close()
 
+    def get_raw_kpi_list(self, kpi_id, monitoring_window_s, last_n_samples, start_timestamp, end_timestamp):
+        try:
+            query_root  = f"SELECT timestamp, kpi_value FROM {self.table} WHERE kpi_id = '{kpi_id}' "
+            query       = query_root
+            start_date  = float()
+            end_date    = float()
+            if last_n_samples:
+                query = query + f"ORDER BY timestamp DESC limit {last_n_samples}"
+            elif monitoring_window_s or start_timestamp or end_timestamp:
+                if start_timestamp and end_timestamp:
+                    start_date  = start_timestamp
+                    end_date    = end_timestamp
+                elif monitoring_window_s:
+                    if start_timestamp and not end_timestamp:
+                        start_date  = start_timestamp
+                        end_date    = start_date + monitoring_window_s
+                    elif end_timestamp and not start_timestamp:
+                        end_date    = end_timestamp
+                        start_date  = end_date - monitoring_window_s
+                    elif not start_timestamp and not end_timestamp:
+                        end_date    = timestamp_utcnow_to_float()
+                        start_date  = end_date - monitoring_window_s
+                query = query + f"AND (timestamp BETWEEN '{timestamp_float_to_string(start_date)}' AND '{timestamp_float_to_string(end_date)}')"
+            else:
+                LOGGER.debug(f"Wrong parameters settings")
+
+            LOGGER.debug(query)
+
+            if self.postgre:
+                kpi_list = self.run_query_postgre(query)
+                LOGGER.debug(f"kpi_list postgre: {kpi_list}")
+            else:
+                kpi_list = self.run_query(query)
+                LOGGER.debug(f"kpi_list influx: {kpi_list}")
+            if kpi_list:
+                LOGGER.debug(f"New data received for subscription to KPI {kpi_id}")
+                return kpi_list
+            else:
+                LOGGER.debug(f"No new data for the subscription to KPI {kpi_id}")
+        except (Exception) as e:
+            LOGGER.debug(f"Subscription data cannot be retrieved. {e}")
+
     def get_subscription_data(self,subs_queue, kpi_id, sampling_interval_s=1):
         try:
             end_date = timestamp_utcnow_to_float() - self.commit_lag_ms / 1000
             start_date = end_date - sampling_interval_s
             query = f"SELECT kpi_id, timestamp, kpi_value FROM {self.table} WHERE kpi_id = '{kpi_id}' AND (timestamp BETWEEN '{timestamp_float_to_string(start_date)}' AND '{timestamp_float_to_string(end_date)}')"
+            LOGGER.debug(query)
             if self.postgre:
                 kpi_list = self.run_query_postgre(query)
                 LOGGER.debug(f"kpi_list postgre: {kpi_list}")
@@ -201,6 +248,8 @@ class MetricsDB():
                 kpi_list = self.run_query(query)
             if kpi_list:
                 LOGGER.debug(f"New data received for alarm of KPI {kpi_id}")
+                LOGGER.info(kpi_list)
+                valid_kpi_list = []
                 for kpi in kpi_list:
                     alarm = False
                     kpi_value = kpi[2]
@@ -263,10 +312,10 @@ class MetricsDB():
                         if (kpi_value >= kpiMaxValue):
                             alarm = True
                     if alarm:
-                        # queue.append[kpi]
-                        alarm_queue.put_nowait(kpi)
-                        LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
-                else:
-                    LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}")
+                        valid_kpi_list.append(kpi)
+                alarm_queue.put_nowait(valid_kpi_list)
+                LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+            else:
+                LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}")
         except (Exception) as e:
             LOGGER.debug(f"Alarm data cannot be retrieved. {e}")
\ No newline at end of file
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index 7cd47f187986a0c32eea2ac8405183ac4418d100..548f34c8a07a1d8df17f2702879dbbadf60f6d13 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -26,9 +26,9 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.proto.monitoring_pb2_grpc import MonitoringServiceServicer
 from common.proto.monitoring_pb2 import AlarmResponse, AlarmDescriptor, AlarmList, SubsList, KpiId, \
     KpiDescriptor, KpiList, KpiQuery, SubsDescriptor, SubscriptionID, AlarmID, KpiDescriptorList, \
-    MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse
+    MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse, RawKpiTable, RawKpi, RawKpiList
 from common.rpc_method_wrapper.ServiceExceptions import ServiceException
-from common.tools.timestamp.Converters import timestamp_string_to_float
+from common.tools.timestamp.Converters import timestamp_string_to_float, timestamp_utcnow_to_float
 
 from monitoring.service import ManagementDBTools, MetricsDBTools
 from device.client.DeviceClient import DeviceClient
@@ -85,13 +85,16 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
             kpi_device_id = request.device_id.device_uuid.uuid
             kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid
             kpi_service_id = request.service_id.service_uuid.uuid
+            kpi_slice_id = request.slice_id.slice_uuid.uuid
+            kpi_connection_id = request.connection_id.connection_uuid.uuid
 
-            if request.kpi_id.kpi_id.uuid is not "":
+
+            if request.kpi_id.kpi_id.uuid != "":
                 response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
             #     Here the code to modify an existing kpi
             else:
                 data = self.management_db.insert_KPI(
-                    kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+                    kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, kpi_slice_id, kpi_connection_id)
                 response.kpi_id.uuid = str(data)
 
             return response
@@ -131,11 +134,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
             if kpi_db is None:
                 LOGGER.info('GetKpiDescriptor error: KpiID({:s}): not found in database'.format(str(kpi_id)))
             else:
-                kpiDescriptor.kpi_description = kpi_db[1]
-                kpiDescriptor.kpi_sample_type = kpi_db[2]
-                kpiDescriptor.device_id.device_uuid.uuid = str(kpi_db[3])
-                kpiDescriptor.endpoint_id.endpoint_uuid.uuid = str(kpi_db[4])
-                kpiDescriptor.service_id.service_uuid.uuid = str(kpi_db[5])
+                kpiDescriptor.kpi_description                       = kpi_db[1]
+                kpiDescriptor.kpi_sample_type                       = kpi_db[2]
+                kpiDescriptor.device_id.device_uuid.uuid            = str(kpi_db[3])
+                kpiDescriptor.endpoint_id.endpoint_uuid.uuid        = str(kpi_db[4])
+                kpiDescriptor.service_id.service_uuid.uuid          = str(kpi_db[5])
+                kpiDescriptor.slice_id.slice_uuid.uuid              = str(kpi_db[6])
+                kpiDescriptor.connection_id.connection_uuid.uuid    = str(kpi_db[7])
             return kpiDescriptor
         except ServiceException as e:
             LOGGER.exception('GetKpiDescriptor exception')
@@ -154,12 +159,14 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
 
             for item in data:
                 kpi_descriptor = KpiDescriptor()
-                kpi_descriptor.kpi_id.kpi_id.uuid = str(item[0])
-                kpi_descriptor.kpi_description = item[1]
-                kpi_descriptor.kpi_sample_type = item[2]
-                kpi_descriptor.device_id.device_uuid.uuid = str(item[3])
-                kpi_descriptor.endpoint_id.endpoint_uuid.uuid = str(item[4])
-                kpi_descriptor.service_id.service_uuid.uuid = str(item[5])
+                kpi_descriptor.kpi_id.kpi_id.uuid                   = str(item[0])
+                kpi_descriptor.kpi_description                      = item[1]
+                kpi_descriptor.kpi_sample_type                      = item[2]
+                kpi_descriptor.device_id.device_uuid.uuid           = str(item[3])
+                kpi_descriptor.endpoint_id.endpoint_uuid.uuid       = str(item[4])
+                kpi_descriptor.service_id.service_uuid.uuid         = str(item[5])
+                kpi_descriptor.slice_id.slice_uuid.uuid             = str(item[6])
+                kpi_descriptor.connection_id.connection_uuid.uuid   = str(item[7])
 
                 kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor)
 
@@ -186,11 +193,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
                 deviceId = kpiDescriptor.device_id.device_uuid.uuid
                 endpointId = kpiDescriptor.endpoint_id.endpoint_uuid.uuid
                 serviceId = kpiDescriptor.service_id.service_uuid.uuid
+                sliceId   = kpiDescriptor.slice_id.slice_uuid.uuid
+                connectionId = kpiDescriptor.connection_id.connection_uuid.uuid
                 time_stamp = request.timestamp.timestamp
                 kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
 
                 # Build the structure to be included as point in the MetricsDB
-                self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, kpi_value)
+                self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, kpi_value)
 
             return Empty()
         except ServiceException as e:
@@ -220,8 +229,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
                 monitor_device_request.sampling_duration_s = request.monitoring_window_s
                 monitor_device_request.sampling_interval_s = request.sampling_rate_s
 
-                device_client = DeviceClient()
-                device_client.MonitorDeviceKpi(monitor_device_request)
+                if not self.management_db.check_monitoring_flag(kpi_id):
+                    device_client = DeviceClient()
+                    device_client.MonitorDeviceKpi(monitor_device_request)
+                    self.management_db.set_monitoring_flag(kpi_id,True)
+                    self.management_db.check_monitoring_flag(kpi_id)
+                else:
+                    LOGGER.warning('MonitorKpi warning: KpiID({:s}) is currently being monitored'.format(str(kpi_id)))
             else:
                 LOGGER.info('MonitorKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
             return response
@@ -234,12 +248,48 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
             grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
             # CREATEKPI_COUNTER_FAILED.inc()
 
-    def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> KpiList:
+    def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> RawKpiTable:
 
         LOGGER.info('QueryKpiData')
         try:
-            # TBC
-            return KpiList()
+            raw_kpi_table = RawKpiTable()
+
+            LOGGER.debug(str(request))
+
+            kpi_id_list             = request.kpi_ids
+            monitoring_window_s     = request.monitoring_window_s
+            last_n_samples          = request.last_n_samples
+            start_timestamp         = request.start_timestamp.timestamp
+            end_timestamp           = request.end_timestamp.timestamp
+
+            # Check if all the Kpi_ids exist
+            for item in kpi_id_list:
+                kpi_id = item.kpi_id.uuid
+
+                kpiDescriptor = self.GetKpiDescriptor(item, grpc_context)
+                if kpiDescriptor is None:
+                    LOGGER.info('QueryKpiData error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+                    break
+                else:
+                    # Execute query per Kpi_id and introduce their kpi_list in the table
+                    kpi_list = self.metrics_db.get_raw_kpi_list(kpi_id,monitoring_window_s,last_n_samples,start_timestamp,end_timestamp)
+                    raw_kpi_list = RawKpiList()
+                    raw_kpi_list.kpi_id.kpi_id.uuid = kpi_id
+
+                    LOGGER.debug(str(kpi_list))
+
+                    if kpi_list is None:
+                        LOGGER.info('QueryKpiData error: KpiID({:s}): points not found in metrics database'.format(str(kpi_id)))
+                    else:
+                        for item in kpi_list:
+                            raw_kpi = RawKpi()
+                            raw_kpi.timestamp.timestamp = timestamp_string_to_float(item[0])
+                            raw_kpi.kpi_value.floatVal  = item[1]
+                            raw_kpi_list.raw_kpis.append(raw_kpi)
+
+                    raw_kpi_table.raw_kpi_lists.append(raw_kpi_list)
+
+            return raw_kpi_table
         except ServiceException as e:
             LOGGER.exception('QueryKpiData exception')
             grpc_context.abort(e.code, e.details)
@@ -250,9 +300,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
 
         LOGGER.info('SubscribeKpi')
         try:
-
             subs_queue = Queue()
-            subs_response = SubsResponse()
 
             kpi_id = request.kpi_id.kpi_id.uuid
             sampling_duration_s = request.sampling_duration_s
@@ -268,18 +316,21 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
                                                   start_timestamp, end_timestamp)
 
             # parse queue to append kpis into the list
-            while not subs_queue.empty():
-                list = subs_queue.get_nowait()
-                for item in list:
-                    kpi = Kpi()
-                    kpi.kpi_id.kpi_id.uuid = str(item[0])
-                    kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
-                    kpi.kpi_value.floatVal = item[2]  # This must be improved
-                    subs_response.kpi_list.kpi.append(kpi)
-
-            subs_response.subs_id.subs_id.uuid = str(subs_id)
-
-            yield subs_response
+            while True:
+                while not subs_queue.empty():
+                    subs_response = SubsResponse()
+                    list = subs_queue.get_nowait()
+                    for item in list:
+                        kpi = Kpi()
+                        kpi.kpi_id.kpi_id.uuid = str(item[0])
+                        kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
+                        kpi.kpi_value.floatVal = item[2]  # This must be improved
+                        subs_response.kpi_list.kpi.append(kpi)
+                    subs_response.subs_id.subs_id.uuid = str(subs_id)
+                    yield subs_response
+                if timestamp_utcnow_to_float() > end_timestamp:
+                    break
+            # yield subs_response
         except ServiceException as e:
             LOGGER.exception('SubscribeKpi exception')
             grpc_context.abort(e.code, e.details)
@@ -373,7 +424,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
 
             LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}")
 
-            if request.alarm_id.alarm_id.uuid is not "":
+            if request.alarm_id.alarm_id.uuid != "":
                 alarm_id = request.alarm_id.alarm_id.uuid
             #     Here the code to modify an existing alarm
             else:
@@ -424,6 +475,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         LOGGER.info('GetAlarmDescriptor')
         try:
             alarm_id = request.alarm_id.uuid
+            LOGGER.debug(alarm_id)
             alarm = self.management_db.get_alarm(alarm_id)
             response = AlarmDescriptor()
 
@@ -454,15 +506,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         LOGGER.info('GetAlarmResponseStream')
         try:
             alarm_id = request.alarm_id.alarm_id.uuid
-            alarm = self.management_db.get_alarm(alarm_id)
-            alarm_response = AlarmResponse()
-
-            if alarm:
+            alarm_data = self.management_db.get_alarm(alarm_id)
+            real_start_time = timestamp_utcnow_to_float()
 
+            if alarm_data:
+                LOGGER.debug(f"{alarm_data}")
                 alarm_queue = Queue()
 
-                alarm_data = self.management_db.get_alarm(alarm)
-
                 alarm_id = request.alarm_id.alarm_id.uuid
                 kpi_id = alarm_data[3]
                 kpiMinValue = alarm_data[4]
@@ -473,24 +523,30 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
                 subscription_frequency_ms = request.subscription_frequency_ms
                 subscription_timeout_s = request.subscription_timeout_s
 
+                end_timestamp = real_start_time + subscription_timeout_s
+
                 self.alarm_manager.create_alarm(alarm_queue, alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange,
                                                 includeMinValue, includeMaxValue, subscription_frequency_ms,
                                                 subscription_timeout_s)
 
-                while not alarm_queue.empty():
-                    list = alarm_queue.get_nowait()
-                    for item in list:
-                        kpi = Kpi()
-                        kpi.kpi_id.kpi_id.uuid = str(item[0])
-                        kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
-                        kpi.kpi_value.floatVal = item[2]  # This must be improved
-                        alarm_response.kpi_list.kpi.append(kpi)
-
-                alarm_response.alarm_id.alarm_id.uuid = alarm_id
-
-                yield alarm_response
+                while True:
+                    while not alarm_queue.empty():
+                        alarm_response = AlarmResponse()
+                        list = alarm_queue.get_nowait()
+                        size = len(list)
+                        for item in list:
+                            kpi = Kpi()
+                            kpi.kpi_id.kpi_id.uuid = str(item[0])
+                            kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
+                            kpi.kpi_value.floatVal = item[2]  # This must be improved
+                            alarm_response.kpi_list.kpi.append(kpi)
+                        alarm_response.alarm_id.alarm_id.uuid = alarm_id
+                        yield alarm_response
+                    if timestamp_utcnow_to_float() > end_timestamp:
+                        break
             else:
                 LOGGER.info('GetAlarmResponseStream error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+                alarm_response = AlarmResponse()
                 alarm_response.alarm_id.alarm_id.uuid = "NoID"
                 return alarm_response
         except ServiceException as e:
@@ -527,7 +583,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         kpi_db = self.management_db.get_KPI(int(kpi_id))
         response = Kpi()
         if kpi_db is None:
-            LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+            LOGGER.info('GetStreamKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
             response.kpi_id.kpi_id.uuid = "NoID"
             return response
         else:
@@ -540,26 +596,29 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         try:
             kpi_id = request.kpi_id.uuid
             response = Kpi()
-            if kpi_id is "":
+            if kpi_id == "":
                 LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
                 response.kpi_id.kpi_id.uuid = "NoID"
             else:
                 query = f"SELECT kpi_id, timestamp, kpi_value FROM monitoring WHERE kpi_id = '{kpi_id}' " \
                         f"LATEST ON timestamp PARTITION BY kpi_id"
-                data = self.metrics_db.run_query(query)[0]
+                data = self.metrics_db.run_query(query)
                 LOGGER.debug(data)
-
-                response.kpi_id.kpi_id.uuid = str(data[0])
-                response.timestamp.timestamp = timestamp_string_to_float(data[1])
-                response.kpi_value.floatVal = data[2]  # This must be improved
+                if len(data) == 0:
+                    response.kpi_id.kpi_id.uuid = request.kpi_id.uuid
+                else:
+                    _data = data[0]
+                    response.kpi_id.kpi_id.uuid = str(_data[0])
+                    response.timestamp.timestamp = timestamp_string_to_float(_data[1])
+                    response.kpi_value.floatVal = _data[2]
 
             return response
         except ServiceException as e:
-            LOGGER.exception('SetKpi exception')
+            LOGGER.exception('GetInstantKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
-            LOGGER.exception('SetKpi exception')
+            LOGGER.exception('GetInstantKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
             grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
 
diff --git a/src/monitoring/service/SubscriptionManager.py b/src/monitoring/service/SubscriptionManager.py
index fe27d6ee365676b05175b762a106621121e3b897..3d1da36b7c5f66c28d3885a305660d6971f695b1 100644
--- a/src/monitoring/service/SubscriptionManager.py
+++ b/src/monitoring/service/SubscriptionManager.py
@@ -42,14 +42,12 @@ class SubscriptionManager():
         if end_timestamp:
             end_date = datetime.utcfromtimestamp(end_timestamp).isoformat()
 
-        LOGGER.debug(f"kpi_id: {kpi_id}")
-        LOGGER.debug(f"sampling_interval_s: {sampling_interval_s}")
-        LOGGER.debug(f"subscription_id: {subscription_id}")
-        LOGGER.debug(f"start_date: {start_date}")
-        self.scheduler.add_job(self.metrics_db.get_subscription_data, args=(subs_queue,kpi_id, sampling_interval_s),
+        job = self.scheduler.add_job(self.metrics_db.get_subscription_data, args=(subs_queue,kpi_id, sampling_interval_s),
                                trigger='interval', seconds=sampling_interval_s, start_date=start_date,
                                end_date=end_date, timezone=pytz.utc, id=str(subscription_id))
         LOGGER.debug(f"Subscrition job {subscription_id} succesfully created")
+        #job.remove()
 
     def delete_subscription(self, subscription_id):
-        self.scheduler.remove_job(subscription_id)
\ No newline at end of file
+        self.scheduler.remove_job(subscription_id)
+        LOGGER.debug(f"Subscription job {subscription_id} succesfully deleted")
\ No newline at end of file
diff --git a/src/monitoring/tests/Messages.py b/src/monitoring/tests/Messages.py
index 845153856c44cec0576bd6f11b045e3310558a97..f15cb5ec2c1d14ed95731cd37e54cb714b29e8b7 100644
--- a/src/monitoring/tests/Messages.py
+++ b/src/monitoring/tests/Messages.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import datetime
 from random import random
 
 from common.proto import monitoring_pb2
@@ -23,13 +22,15 @@ def kpi_id():
     _kpi_id.kpi_id.uuid = str(1)            # pylint: disable=maybe-no-member
     return _kpi_id
 
-def create_kpi_request():
-    _create_kpi_request                                = monitoring_pb2.KpiDescriptor()
-    _create_kpi_request.kpi_description                = 'KPI Description Test'
-    _create_kpi_request.kpi_sample_type                = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
-    _create_kpi_request.device_id.device_uuid.uuid     = 'DEV1'     # pylint: disable=maybe-no-member
-    _create_kpi_request.service_id.service_uuid.uuid   = 'SERV1'    # pylint: disable=maybe-no-member
-    _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END1'     # pylint: disable=maybe-no-member
+def create_kpi_request(kpi_id_str):
+    _create_kpi_request                                     = monitoring_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_description                     = 'KPI Description Test'
+    _create_kpi_request.kpi_sample_type                     = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid          = 'DEV' + str(kpi_id_str)
+    _create_kpi_request.service_id.service_uuid.uuid        = 'SERV' + str(kpi_id_str)
+    _create_kpi_request.slice_id.slice_uuid.uuid            = 'SLC' + str(kpi_id_str)
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid      = 'END' + str(kpi_id_str)
+    _create_kpi_request.connection_id.connection_uuid.uuid  = 'CON' + str(kpi_id_str)
     return _create_kpi_request
 
 def create_kpi_request_b():
@@ -38,7 +39,9 @@ def create_kpi_request_b():
     _create_kpi_request.kpi_sample_type                = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
     _create_kpi_request.device_id.device_uuid.uuid     = 'DEV2'     # pylint: disable=maybe-no-member
     _create_kpi_request.service_id.service_uuid.uuid   = 'SERV2'    # pylint: disable=maybe-no-member
+    _create_kpi_request.slice_id.slice_uuid.uuid       = 'SLC2'  # pylint: disable=maybe-no-member
     _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END2'     # pylint: disable=maybe-no-member
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2'  # pylint: disable=maybe-no-member
     return _create_kpi_request
 
 def create_kpi_request_c():
@@ -47,7 +50,9 @@ def create_kpi_request_c():
     _create_kpi_request.kpi_sample_type                = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
     _create_kpi_request.device_id.device_uuid.uuid     = 'DEV3'     # pylint: disable=maybe-no-member
     _create_kpi_request.service_id.service_uuid.uuid   = 'SERV3'    # pylint: disable=maybe-no-member
+    _create_kpi_request.slice_id.slice_uuid.uuid       = 'SLC3'  # pylint: disable=maybe-no-member
     _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END3'     # pylint: disable=maybe-no-member
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON3'  # pylint: disable=maybe-no-member
     return _create_kpi_request
 
 def monitor_kpi_request(kpi_uuid, monitoring_window_s, sampling_rate_s):
@@ -69,20 +74,32 @@ def kpi_descriptor_list():
 
     return _kpi_descriptor_list
 
-def kpi_query():
+def kpi_query(kpi_id_list):
     _kpi_query = monitoring_pb2.KpiQuery()
 
+    _kpi_query.kpi_ids.extend(kpi_id_list)
+    # _kpi_query.monitoring_window_s          = 10
+    # _kpi_query.last_n_samples               = 2
+    _kpi_query.start_timestamp.timestamp    = timestamp_utcnow_to_float() - 10
+    _kpi_query.end_timestamp.timestamp      = timestamp_utcnow_to_float()
+
     return _kpi_query
 
 def subs_descriptor(kpi_id):
     _subs_descriptor = monitoring_pb2.SubsDescriptor()
 
+    sampling_duration_s = 10
+    sampling_interval_s = 3
+    real_start_time     = timestamp_utcnow_to_float()
+    start_timestamp     = real_start_time
+    end_timestamp       = start_timestamp + sampling_duration_s
+
     _subs_descriptor.subs_id.subs_id.uuid       = ""
     _subs_descriptor.kpi_id.kpi_id.uuid         = kpi_id.kpi_id.uuid
-    _subs_descriptor.sampling_duration_s        = 10
-    _subs_descriptor.sampling_interval_s        = 2
-    _subs_descriptor.start_timestamp.timestamp  = timestamp_utcnow_to_float()
-    _subs_descriptor.end_timestamp.timestamp    = timestamp_utcnow_to_float() + 10
+    _subs_descriptor.sampling_duration_s        = sampling_duration_s
+    _subs_descriptor.sampling_interval_s        = sampling_interval_s
+    _subs_descriptor.start_timestamp.timestamp  = start_timestamp
+    _subs_descriptor.end_timestamp.timestamp    = end_timestamp
 
     return _subs_descriptor
 
@@ -91,14 +108,14 @@ def subs_id():
 
     return _subs_id
 
-def alarm_descriptor():
+def alarm_descriptor(kpi_id):
     _alarm_descriptor = monitoring_pb2.AlarmDescriptor()
 
     _alarm_descriptor.alarm_description                     = "Alarm Description"
     _alarm_descriptor.name                                  = "Alarm Name"
-    _alarm_descriptor.kpi_id.kpi_id.uuid                    = "1"
+    _alarm_descriptor.kpi_id.kpi_id.uuid                    = kpi_id.kpi_id.uuid
     _alarm_descriptor.kpi_value_range.kpiMinValue.floatVal  = 0.0
-    _alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal  = 50.0
+    _alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal  = 250.0
     _alarm_descriptor.kpi_value_range.inRange               = True
     _alarm_descriptor.kpi_value_range.includeMinValue       = False
     _alarm_descriptor.kpi_value_range.includeMaxValue       = True
@@ -113,11 +130,16 @@ def alarm_descriptor_b():
     return _alarm_descriptor
 
 def alarm_subscription(alarm_id):
-    _alarm_descriptor = monitoring_pb2.AlarmSubscription()
+    _alarm_subscription = monitoring_pb2.AlarmSubscription()
 
-    _alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm_id)
+    subscription_timeout_s = 10
+    subscription_frequency_ms = 1000
 
-    return _alarm_descriptor
+    _alarm_subscription.alarm_id.alarm_id.uuid      = str(alarm_id.alarm_id.uuid)
+    _alarm_subscription.subscription_timeout_s      = subscription_timeout_s
+    _alarm_subscription.subscription_frequency_ms   = subscription_frequency_ms
+
+    return _alarm_subscription
 
 
 def alarm_id():
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index ee6a29e8a483fe53c58a6e6d2e3aa240f2456b81..b113f5a7822841e17274300dc7102664bce1c409 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -15,11 +15,14 @@
 import copy, os, pytest
 import threading
 import time
+from queue import Queue
+from random import random
 from time import sleep
 from typing import Tuple
 
 from apscheduler.executors.pool import ProcessPoolExecutor
 from apscheduler.schedulers.background import BackgroundScheduler
+from apscheduler.schedulers.base import STATE_STOPPED
 from grpc._channel import _MultiThreadedRendezvous
 
 from common.Constants import ServiceNameEnum
@@ -33,7 +36,8 @@ from common.message_broker.MessageBroker import MessageBroker
 from common.proto import monitoring_pb2
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, KpiList, SubsDescriptor, SubsList, AlarmID, \
-    AlarmDescriptor, AlarmList, Kpi, KpiDescriptorList, SubsResponse, AlarmResponse
+    AlarmDescriptor, AlarmList, Kpi, KpiDescriptorList, SubsResponse, AlarmResponse, RawKpiTable
+from common.tools.timestamp.Converters import timestamp_utcnow_to_float, timestamp_string_to_float
 
 from context.client.ContextClient import ContextClient
 from context.service.grpc_server.ContextService import ContextService
@@ -43,6 +47,9 @@ from device.client.DeviceClient import DeviceClient
 from device.service.DeviceService import DeviceService
 from device.service.driver_api.DriverFactory import DriverFactory
 from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
+from monitoring.service.AlarmManager import AlarmManager
+from monitoring.service.MetricsDBTools import MetricsDB
+from monitoring.service.SubscriptionManager import SubscriptionManager
 
 os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
 from device.service.drivers import DRIVERS  # pylint: disable=wrong-import-position
@@ -175,14 +182,23 @@ def subs_scheduler():
 
     return _scheduler
 
-def ingestion_data(monitoring_client):
-    _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
-    _include_kpi_request = include_kpi_request(_kpi_id)
+def ingestion_data(kpi_id_int):
+    metrics_db = MetricsDB("localhost", "9009", "9000", "monitoring")
+
+    for i in range(50):
+        kpiSampleType   = KpiSampleType.Name(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED).upper().replace('KPISAMPLETYPE_', '')
+        kpiId           = kpi_id_int
+        deviceId        = 'DEV'+ str(kpi_id_int)
+        endpointId      = 'END' + str(kpi_id_int)
+        serviceId       = 'SERV' + str(kpi_id_int)
+        sliceId         = 'SLC' + str(kpi_id_int)
+        connectionId    = 'CON' + str(kpi_id_int)
+        time_stamp      = timestamp_utcnow_to_float()
+        kpi_value       = 500*random()
 
-    for i in range(200):
-        _include_kpi_request = include_kpi_request(_kpi_id)
-        monitoring_client.IncludeKpi(_include_kpi_request)
-        time.sleep(0.01)
+        metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId,
+                                  kpi_value)
+        sleep(0.1)
 
 ###########################
 # Tests Implementation
@@ -192,18 +208,17 @@ def ingestion_data(monitoring_client):
 def test_set_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     # make call to server
     LOGGER.warning('test_create_kpi requesting')
-    response = monitoring_client.SetKpi(create_kpi_request())
-    LOGGER.debug(str(response))
-    response = monitoring_client.SetKpi(create_kpi_request_b())
-    LOGGER.debug(str(response))
-    assert isinstance(response, KpiId)
+    for i in range(3):
+        response = monitoring_client.SetKpi(create_kpi_request(str(i+1)))
+        LOGGER.debug(str(response))
+        assert isinstance(response, KpiId)
 
 
 # Test case that makes use of client fixture to test server's DeleteKpi method
 def test_delete_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     # make call to server
     LOGGER.warning('delete_kpi requesting')
-    response = monitoring_client.SetKpi(create_kpi_request_b())
+    response = monitoring_client.SetKpi(create_kpi_request('4'))
     response = monitoring_client.DeleteKpi(response)
     LOGGER.debug(str(response))
     assert isinstance(response, Empty)
@@ -211,7 +226,7 @@ def test_delete_kpi(monitoring_client): # pylint: disable=redefined-outer-name
 # Test case that makes use of client fixture to test server's GetKpiDescriptor method
 def test_get_kpidescritor(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_getkpidescritor_kpi begin')
-    response = monitoring_client.SetKpi(create_kpi_request_c())
+    response = monitoring_client.SetKpi(create_kpi_request('1'))
     response = monitoring_client.GetKpiDescriptor(response)
     LOGGER.debug(str(response))
     assert isinstance(response, KpiDescriptor)
@@ -227,7 +242,8 @@ def test_get_kpi_descriptor_list(monitoring_client): # pylint: disable=redefined
 def test_include_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     # make call to server
     LOGGER.warning('test_include_kpi requesting')
-    kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    kpi_id = monitoring_client.SetKpi(create_kpi_request('1'))
+    LOGGER.debug(str(kpi_id))
     response = monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
     LOGGER.debug(str(response))
     assert isinstance(response, Empty)
@@ -261,44 +277,40 @@ def test_monitor_kpi(
     response = device_client.AddDevice(Device(**device_with_connect_rules))
     assert response.device_uuid.uuid == DEVICE_DEV1_UUID
 
-    response = monitoring_client.SetKpi(create_kpi_request())
+    response = monitoring_client.SetKpi(create_kpi_request('1'))
     _monitor_kpi_request = monitor_kpi_request(response.kpi_id.uuid, 120, 5) # pylint: disable=maybe-no-member
     response = monitoring_client.MonitorKpi(_monitor_kpi_request)
     LOGGER.debug(str(response))
     assert isinstance(response, Empty)
 
 # Test case that makes use of client fixture to test server's QueryKpiData method
-def test_query_kpi_data(monitoring_client): # pylint: disable=redefined-outer-name
+def test_query_kpi_data(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name
+
+    kpi_id_list = []
+    for i in range(2):
+        kpi_id = monitoring_client.SetKpi(create_kpi_request(str(i+1)))
+        subs_scheduler.add_job(ingestion_data, args=[kpi_id.kpi_id.uuid])
+        kpi_id_list.append(kpi_id)
     LOGGER.warning('test_query_kpi_data')
-    response = monitoring_client.QueryKpiData(kpi_query())
+    sleep(5)
+    response = monitoring_client.QueryKpiData(kpi_query(kpi_id_list))
     LOGGER.debug(str(response))
-    assert isinstance(response, KpiList)
-
-def test_ingestion_data(monitoring_client):
-    _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
-    _include_kpi_request = include_kpi_request(_kpi_id)
-
-    for i in range(100):
-        _include_kpi_request = include_kpi_request(_kpi_id)
-        monitoring_client.IncludeKpi(_include_kpi_request)
-        time.sleep(0.01)
-
-# def test_subscription_scheduler(monitoring_client,metrics_db,subs_scheduler):
-#     subs_scheduler.add_job(ingestion_data(monitoring_client),id="1")
+    assert isinstance(response, RawKpiTable)
+    if (subs_scheduler.state != STATE_STOPPED):
+        subs_scheduler.shutdown()
 
 # Test case that makes use of client fixture to test server's SetKpiSubscription method
-def test_set_kpi_subscription(monitoring_client,metrics_db): # pylint: disable=redefined-outer-name
+def test_set_kpi_subscription(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_set_kpi_subscription')
-    kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
-    # thread = threading.Thread(target=test_ingestion_data, args=(monitoring_client,metrics_db))
-    # thread.start()
-    monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
+    kpi_id = monitoring_client.SetKpi(create_kpi_request('1'))
+    subs_scheduler.add_job(ingestion_data, args=[kpi_id.kpi_id.uuid])
     response = monitoring_client.SetKpiSubscription(subs_descriptor(kpi_id))
     assert isinstance(response, _MultiThreadedRendezvous)
-    LOGGER.debug(response)
     for item in response:
         LOGGER.debug(item)
         assert isinstance(item, SubsResponse)
+    if (subs_scheduler.state != STATE_STOPPED):
+        subs_scheduler.shutdown()
 
 # Test case that makes use of client fixture to test server's GetSubsDescriptor method
 def test_get_subs_descriptor(monitoring_client):
@@ -331,7 +343,8 @@ def test_delete_subscription(monitoring_client):
 # Test case that makes use of client fixture to test server's SetKpiAlarm method
 def test_set_kpi_alarm(monitoring_client):
     LOGGER.warning('test_set_kpi_alarm')
-    response = monitoring_client.SetKpiAlarm(alarm_descriptor())
+    kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    response = monitoring_client.SetKpiAlarm(alarm_descriptor(kpi_id))
     LOGGER.debug(str(response))
     assert isinstance(response, AlarmID)
 
@@ -345,28 +358,35 @@ def test_get_alarms(monitoring_client):
 # Test case that makes use of client fixture to test server's GetAlarmDescriptor method
 def test_get_alarm_descriptor(monitoring_client):
     LOGGER.warning('test_get_alarm_descriptor')
-    alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor())
-    response = monitoring_client.GetAlarmDescriptor(alarm_id)
-    LOGGER.debug(response)
-    assert isinstance(response, AlarmDescriptor)
+    _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id))
+    _response = monitoring_client.GetAlarmDescriptor(_alarm_id)
+    LOGGER.debug(_response)
+    assert isinstance(_response, AlarmDescriptor)
 
 # Test case that makes use of client fixture to test server's GetAlarmResponseStream method
-def test_get_alarm_response_stream(monitoring_client):
+def test_get_alarm_response_stream(monitoring_client,subs_scheduler):
     LOGGER.warning('test_get_alarm_descriptor')
-    alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor())
-    response = monitoring_client.GetAlarmResponseStream(alarm_subscription(alarm_id))
-    assert isinstance(response, _MultiThreadedRendezvous)
-    for item in response:
-        LOGGER.debug(response)
+    _kpi_id = monitoring_client.SetKpi(create_kpi_request('3'))
+    _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id))
+    subs_scheduler.add_job(ingestion_data,args=[_kpi_id.kpi_id.uuid])
+    _response = monitoring_client.GetAlarmResponseStream(alarm_subscription(_alarm_id))
+    assert isinstance(_response, _MultiThreadedRendezvous)
+    for item in _response:
+        LOGGER.debug(item)
         assert isinstance(item,AlarmResponse)
 
+    if(subs_scheduler.state != STATE_STOPPED):
+        subs_scheduler.shutdown()
+
 # Test case that makes use of client fixture to test server's DeleteAlarm method
 def test_delete_alarm(monitoring_client):
     LOGGER.warning('test_delete_alarm')
-    alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor())
-    response = monitoring_client.DeleteAlarm(alarm_id)
-    LOGGER.debug(type(response))
-    assert isinstance(response, Empty)
+    _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id))
+    _response = monitoring_client.DeleteAlarm(_alarm_id)
+    LOGGER.debug(type(_response))
+    assert isinstance(_response, Empty)
 
 # Test case that makes use of client fixture to test server's GetStreamKpi method
 def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-name
@@ -384,64 +404,117 @@ def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-na
 #     response = monitoring_client.GetInstantKpi(kpi_id)
 #     LOGGER.debug(response)
 #     assert isinstance(response, Kpi)
-    # response = monitoring_client.GetInstantKpi(KpiId())
-    # LOGGER.debug(type(response))
-    # assert response.kpi_id.kpi_id.uuid == "NoID"
-def test_managementdb_tools_insert_kpi(management_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_managementdb_tools_insert_kpi begin')
-    _create_kpi_request = create_kpi_request()
-    kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
-    kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
-    kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
-    kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
-    kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
-
-    response = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
-    assert isinstance(response, int)
 
-def test_managementdb_tools_get_kpi(management_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_managementdb_tools_get_kpi begin')
-    _create_kpi_request = create_kpi_request()
+def test_managementdb_tools_kpis(management_db): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_managementdb_tools_kpis begin')
+    _create_kpi_request = create_kpi_request('5')
     kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
     kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
     kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
     kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
     kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
+    kpi_slice_id = _create_kpi_request.slice_id.slice_uuid.uuid
+    kpi_connection_id  = _create_kpi_request.connection_id.connection_uuid.uuid
+
+    _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id,kpi_slice_id,kpi_connection_id)
+    assert isinstance(_kpi_id, int)
 
-    _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
     response = management_db.get_KPI(_kpi_id)
     assert isinstance(response, tuple)
 
-def test_managementdb_tools_get_kpis(management_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_managementdb_tools_get_kpis begin')
+    response = management_db.set_monitoring_flag(_kpi_id,True)
+    assert response is True
+    response = management_db.check_monitoring_flag(_kpi_id)
+    assert response is True
+    management_db.set_monitoring_flag(_kpi_id, False)
+    response = management_db.check_monitoring_flag(_kpi_id)
+    assert response is False
+
     response = management_db.get_KPIS()
     assert isinstance(response, list)
 
-def test_managementdb_tools_delete_kpi(management_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_managementdb_tools_get_kpi begin')
-
-    _create_kpi_request = create_kpi_request()
-    kpi_description = _create_kpi_request.kpi_description  # pylint: disable=maybe-no-member
-    kpi_sample_type = _create_kpi_request.kpi_sample_type  # pylint: disable=maybe-no-member
-    kpi_device_id = _create_kpi_request.device_id.device_uuid.uuid  # pylint: disable=maybe-no-member
-    kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid  # pylint: disable=maybe-no-member
-    kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid  # pylint: disable=maybe-no-member
-
-    _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id,
-                                        kpi_service_id)
-
     response = management_db.delete_KPI(_kpi_id)
-
     assert response
 
-def test_metrics_db_tools_write_kpi(metrics_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_metric_sdb_tools_write_kpi begin')
-
 
-def test_metrics_db_tools_read_kpi_points(metrics_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_metrics_db_tools_read_kpi_points begin')
+def test_managementdb_tools_insert_alarm(management_db):
+    LOGGER.warning('test_managementdb_tools_insert_alarm begin')
 
+    _alarm_description  = "Alarm Description"
+    _alarm_name         = "Alarm Name"
+    _kpi_id             = "3"
+    _kpi_min_value      = 0.0
+    _kpi_max_value      = 250.0
+    _in_range           = True
+    _include_min_value  = False
+    _include_max_value  = True
 
+    _alarm_id = management_db.insert_alarm(_alarm_description, _alarm_name, _kpi_id, _kpi_min_value,
+                                               _kpi_max_value,
+                                               _in_range, _include_min_value, _include_max_value)
+    LOGGER.debug(_alarm_id)
+    assert isinstance(_alarm_id,int)
+#
+# def test_metrics_db_tools(metrics_db): # pylint: disable=redefined-outer-name
+#     LOGGER.warning('test_metric_sdb_tools_write_kpi begin')
+#     _kpiId = "6"
+#
+#     for i in range(50):
+#         _kpiSampleType = KpiSampleType.Name(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED).upper().replace('KPISAMPLETYPE_', '')
+#         _deviceId = 'DEV4'
+#         _endpointId = 'END4'
+#         _serviceId = 'SERV4'
+#         _sliceId = 'SLC4'
+#         _connectionId = 'CON4'
+#         _time_stamp = timestamp_utcnow_to_float()
+#         _kpi_value = 500*random()
+#
+#         metrics_db.write_KPI(_time_stamp, _kpiId, _kpiSampleType, _deviceId, _endpointId, _serviceId, _sliceId, _connectionId,
+#                                   _kpi_value)
+#         sleep(0.05)
+#
+#     _query = f"SELECT * FROM monitoring WHERE kpi_id ='{_kpiId}'"
+#     _data = metrics_db.run_query(_query)
+#     assert len(_data) >= 50
+#
+# def test_subscription_manager_create_subscription(management_db,metrics_db,subs_scheduler):
+#     LOGGER.warning('test_subscription_manager_create_subscription begin')
+#     subs_queue = Queue()
+#
+#     subs_manager = SubscriptionManager(metrics_db)
+#
+#     subs_scheduler.add_job(ingestion_data)
+#
+#     kpi_id = "3"
+#     sampling_duration_s = 20
+#     sampling_interval_s = 3
+#     real_start_time     = timestamp_utcnow_to_float()
+#     start_timestamp     = real_start_time
+#     end_timestamp       = start_timestamp + sampling_duration_s
+#
+#     subs_id = management_db.insert_subscription(kpi_id, "localhost", sampling_duration_s,
+#                                                sampling_interval_s,start_timestamp,end_timestamp)
+#     subs_manager.create_subscription(subs_queue,subs_id,kpi_id,sampling_interval_s,
+#                                      sampling_duration_s,start_timestamp,end_timestamp)
+#
+#     # This is here to simulate application activity (which keeps the main thread alive).
+#     total_points = 0
+#     while True:
+#         while not subs_queue.empty():
+#             list = subs_queue.get_nowait()
+#             kpi_list = KpiList()
+#             for item in list:
+#                 kpi = Kpi()
+#                 kpi.kpi_id.kpi_id.uuid = item[0]
+#                 kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
+#                 kpi.kpi_value.floatVal = item[2]
+#                 kpi_list.kpi.append(kpi)
+#                 total_points += 1
+#             LOGGER.debug(kpi_list)
+#         if timestamp_utcnow_to_float() > end_timestamp:
+#             break
+#
+#     assert total_points != 0
 
 def test_events_tools(
         context_client : ContextClient,                 # pylint: disable=redefined-outer-name
diff --git a/src/common/database/api/context/slice/SliceStatus.py b/src/slice/old_code/SliceStatus.py
similarity index 100%
rename from src/common/database/api/context/slice/SliceStatus.py
rename to src/slice/old_code/SliceStatus.py
diff --git a/src/slice/old_code/Tools.py b/src/slice/old_code/Tools.py
index 4ea7900489f27588399e2eb94b6a5576d8b08fd0..08323f935195d8a0221b3f8889c0e6beeef94cb2 100644
--- a/src/slice/old_code/Tools.py
+++ b/src/slice/old_code/Tools.py
@@ -18,7 +18,7 @@ from common.Checkers import chk_options, chk_string
 from common.database.api.Database import Database
 from common.database.api.context.Constants import DEFAULT_CONTEXT_ID, DEFAULT_TOPOLOGY_ID
 from common.database.api.context.service.Service import Service
-from common.database.api.context.slice.SliceStatus import SliceStatus, slicestatus_enum_values, to_slicestatus_enum
+from slice.old_code.SliceStatus import SliceStatus, slicestatus_enum_values, to_slicestatus_enum
 from common.database.api.context.topology.device.Endpoint import Endpoint
 from common.exceptions.ServiceException import ServiceException
 from common.proto.slice_pb2 import TransportSlice
diff --git a/src/tests/Fixtures.py b/src/tests/Fixtures.py
index aeead8448651b386f4c69d12c139b6043fe5ef55..25b73e1de143b8c60d9a726ddf2bd3cea97d17a5 100644
--- a/src/tests/Fixtures.py
+++ b/src/tests/Fixtures.py
@@ -13,8 +13,6 @@
 # limitations under the License.
 
 import pytest
-from common.Settings import get_setting
-from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from monitoring.client.MonitoringClient import MonitoringClient
diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh
index 8afd683843d4882e75c3cbca8363aa3d63edda7f..ffd91da35186fe21f418950493ef797a9af1b522 100644
--- a/src/tests/ofc22/deploy_specs.sh
+++ b/src/tests/ofc22/deploy_specs.sh
@@ -2,6 +2,11 @@
 export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
+# Supported components are:
+#   context device automation policy service compute monitoring webui
+#   interdomain slice pathcomp dlt
+#   dbscanserving opticalattackmitigator opticalattackdetector
+#   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
 export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
 
 # Set the tag you want to use for your images.
@@ -13,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
-# Set the neew Grafana admin password
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json
index 83f9c39e2ac7154b088ccdd0a1519ea32c1aee1d..a71d454f41f324cabb48a023d6d840a59245800c 100644
--- a/src/tests/ofc22/descriptors_emulated.json
+++ b/src/tests/ofc22/descriptors_emulated.json
@@ -9,70 +9,83 @@
     "topologies": [
         {
             "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}},
-            "device_ids": [],
-            "link_ids": []
+            "device_ids": [
+                {"device_uuid": {"uuid": "R1-EMU"}},
+                {"device_uuid": {"uuid": "R2-EMU"}},
+                {"device_uuid": {"uuid": "R3-EMU"}},
+                {"device_uuid": {"uuid": "R4-EMU"}},
+                {"device_uuid": {"uuid": "O1-OLS"}}                
+            ],
+            "link_ids": [
+                {"link_uuid": {"uuid": "R1-EMU/13/0/0==O1-OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+                {"link_uuid": {"uuid": "R2-EMU/13/0/0==O1-OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+                {"link_uuid": {"uuid": "R3-EMU/13/0/0==O1-OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+                {"link_uuid": {"uuid": "R4-EMU/13/0/0==O1-OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
+            ]
         }
     ],
     "devices": [
         {
-            "device_id": {"device_uuid": {"uuid": "R1-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "R2-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "R3-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "R4-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "O1-OLS"}},
-            "device_type": "emu-open-line-system",
+            "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "device_type": "emu-open-line-system",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"aade6001-f00b-5e2f-a357-6a0a9d3de870\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"eb287d83-f05e-53ec-ab5a-adf6bd2b5418\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"0ef74f99-1acc-57bd-ab9d-4b958b06c513\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"50296d99-58cc-5ce7-82f5-fc8ee4eec2ec\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870", "type": "optical", "sample_types": []},
+                    {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "type": "optical", "sample_types": []},
+                    {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "type": "optical", "sample_types": []},
+                    {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "type": "optical", "sample_types": []}
+                ]}}}
+            ]}
         }
     ],
     "links": [
diff --git a/src/tests/ofc22/run_test_01_bootstrap.sh b/src/tests/ofc22/run_test_01_bootstrap.sh
index bb740707321b24fc960299f2eac91cc2d9775b64..61b49b251f927ffb2e845f0c9094d30ea597abc6 100755
--- a/src/tests/ofc22/run_test_01_bootstrap.sh
+++ b/src/tests/ofc22/run_test_01_bootstrap.sh
@@ -13,9 +13,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# make sure to source the following scripts:
-# - my_deploy.sh
-# - tfs_runtime_env_vars.sh
-
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_bootstrap.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_bootstrap.py
diff --git a/src/tests/ofc22/run_test_02_create_service.sh b/src/tests/ofc22/run_test_02_create_service.sh
index 8b6c8658df759bdcb777f83c6c7846d0ea7b48ed..135a3f74fe93d0d7a4da6ef0e02371a040fc1eb3 100755
--- a/src/tests/ofc22/run_test_02_create_service.sh
+++ b/src/tests/ofc22/run_test_02_create_service.sh
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_create_service.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_create_service.py
diff --git a/src/tests/ofc22/run_test_03_delete_service.sh b/src/tests/ofc22/run_test_03_delete_service.sh
index 51df41aee216e141b0d2e2f55a0398ecd9cdf35f..cbe6714fe91cf1758f62e697e667568d35578181 100755
--- a/src/tests/ofc22/run_test_03_delete_service.sh
+++ b/src/tests/ofc22/run_test_03_delete_service.sh
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_delete_service.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_delete_service.py
diff --git a/src/tests/ofc22/run_test_04_cleanup.sh b/src/tests/ofc22/run_test_04_cleanup.sh
index 2ba91684f9eb49075dd68877e54976f989811ae9..e88ddbd3227b3f29dfc7f126d5853e0b1d0e06f1 100755
--- a/src/tests/ofc22/run_test_04_cleanup.sh
+++ b/src/tests/ofc22/run_test_04_cleanup.sh
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_cleanup.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/run_tests_and_coverage.sh b/src/tests/ofc22/run_tests.sh
similarity index 77%
rename from src/tests/ofc22/run_tests_and_coverage.sh
rename to src/tests/ofc22/run_tests.sh
index ae956925a430e0eab167bf36a49be59014a2a97b..0ad4be313987b8b5069808873f94840521d4284e 100755
--- a/src/tests/ofc22/run_tests_and_coverage.sh
+++ b/src/tests/ofc22/run_tests.sh
@@ -16,7 +16,6 @@
 
 PROJECTDIR=`pwd`
 
-# cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 COVERAGEFILE=$PROJECTDIR/coverage/.coverage
 
@@ -31,15 +30,15 @@ source tfs_runtime_env_vars.sh
 # Force a flush of Context database
 kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
 
-# Run functional tests and analyze code coverage at the same time
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+# Run functional tests
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_bootstrap.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_create_service.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_delete_service.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/setup_test_env.sh b/src/tests/ofc22/setup_test_env.sh
deleted file mode 100755
index 1f8b0a5a7a8dc986715c6f54a62151f6afa4ad80..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/setup_test_env.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get service/contextservice --namespace tfs  --template '{{.spec.clusterIP}}')
-export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service/contextservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}')
-export COMPUTESERVICE_SERVICE_HOST=$(kubectl get service/computeservice --namespace tfs  --template '{{.spec.clusterIP}}')
-export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service/computeservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
-echo "CONTEXTSERVICE_SERVICE_HOST=$CONTEXTSERVICE_SERVICE_HOST"
-echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=$CONTEXTSERVICE_SERVICE_PORT_GRPC"
-echo "COMPUTESERVICE_SERVICE_HOST=$COMPUTESERVICE_SERVICE_HOST"
-echo "COMPUTESERVICE_SERVICE_PORT_HTTP=$COMPUTESERVICE_SERVICE_PORT_HTTP"
diff --git a/src/tests/ofc22/tests/Fixtures.py b/src/tests/ofc22/tests/Fixtures.py
index 370731e5de14b2c7c4acdcfa86eacfa66f2ffd4b..3b35a12e299ba776e909fbdd2739e971431083a6 100644
--- a/src/tests/ofc22/tests/Fixtures.py
+++ b/src/tests/ofc22/tests/Fixtures.py
@@ -12,14 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import pytest
+import pytest, logging
 from common.Settings import get_setting
-from compute.tests.mock_osm.MockOSM import MockOSM
-from .Objects import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
+from tests.tools.mock_osm.Constants import WIM_PASSWORD, WIM_USERNAME
+from tests.tools.mock_osm.MockOSM import MockOSM
+from .Objects import WIM_MAPPING
 
+LOGGER = logging.getLogger(__name__)
 
 @pytest.fixture(scope='session')
 def osm_wim():
     wim_url = 'http://{:s}:{:s}'.format(
         get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
+    LOGGER.info('WIM_MAPPING = {:s}'.format(str(WIM_MAPPING)))
     return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
diff --git a/src/tests/ofc22/tests/Objects.py b/src/tests/ofc22/tests/Objects.py
index d2fb32ebb20b7bcdda9ac12b7a7390c46e6fb1d1..7bfbe9fce558d6a86d965ecb6421369d7f544d4d 100644
--- a/src/tests/ofc22/tests/Objects.py
+++ b/src/tests/ofc22/tests/Objects.py
@@ -12,220 +12,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, List, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
-from common.tools.object_factory.Context import json_context, json_context_id
-from common.tools.object_factory.Device import (
-    json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
-    json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled)
-from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
-from common.tools.object_factory.Link import json_link, json_link_id
-from common.tools.object_factory.Topology import json_topology, json_topology_id
-from common.proto.kpi_sample_types_pb2 import KpiSampleType
-
-# ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
-
-# ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-
-# ----- Monitoring Samples ---------------------------------------------------------------------------------------------
-PACKET_PORT_SAMPLE_TYPES = [
-    KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED,
-    KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED,
-    KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED,
-    KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED,
-]
-
-# ----- Device Credentials and Settings --------------------------------------------------------------------------------
-try:
-    from .Credentials import DEVICE_R1_ADDRESS, DEVICE_R1_PORT, DEVICE_R1_USERNAME, DEVICE_R1_PASSWORD
-    from .Credentials import DEVICE_R3_ADDRESS, DEVICE_R3_PORT, DEVICE_R3_USERNAME, DEVICE_R3_PASSWORD
-    from .Credentials import DEVICE_O1_ADDRESS, DEVICE_O1_PORT
-    USE_REAL_DEVICES = True     # Use real devices
-except ImportError:
-    USE_REAL_DEVICES = False    # Use emulated devices
-
-    DEVICE_R1_ADDRESS  = '0.0.0.0'
-    DEVICE_R1_PORT     = 830
-    DEVICE_R1_USERNAME = 'admin'
-    DEVICE_R1_PASSWORD = 'admin'
-
-    DEVICE_R3_ADDRESS  = '0.0.0.0'
-    DEVICE_R3_PORT     = 830
-    DEVICE_R3_USERNAME = 'admin'
-    DEVICE_R3_PASSWORD = 'admin'
-
-    DEVICE_O1_ADDRESS  = '0.0.0.0'
-    DEVICE_O1_PORT     = 4900
-
-#USE_REAL_DEVICES = False     # Uncomment to force to use emulated devices
-
-def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]):
-    return [
-        json_endpoint_id(device_id, ep_uuid, topology_id=None)
-        for ep_uuid, _, _ in endpoint_descriptors
-    ]
-
-def json_endpoints(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]):
-    return [
-        json_endpoint(device_id, ep_uuid, ep_type, topology_id=None, kpi_sample_types=ep_sample_types)
-        for ep_uuid, ep_type, ep_sample_types in endpoint_descriptors
-    ]
-
-def get_link_uuid(a_device_id : Dict, a_endpoint_id : Dict, z_device_id : Dict, z_endpoint_id : Dict) -> str:
-    return '{:s}/{:s}=={:s}/{:s}'.format(
-        a_device_id['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'],
-        z_device_id['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
-
-
-# ----- Devices --------------------------------------------------------------------------------------------------------
-if not USE_REAL_DEVICES:
-    json_device_packetrouter_disabled = json_device_emulated_packet_router_disabled
-    json_device_tapi_disabled         = json_device_emulated_tapi_disabled
-
-DEVICE_R1_UUID          = 'R1-EMU'
-DEVICE_R1_TIMEOUT       = 120
-DEVICE_R1_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R1_ID            = json_device_id(DEVICE_R1_UUID)
-#DEVICE_R1_ENDPOINTS     = json_endpoints(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS)
-DEVICE_R1_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS)
-DEVICE_R1               = json_device_packetrouter_disabled(DEVICE_R1_UUID)
-ENDPOINT_ID_R1_13_0_0   = DEVICE_R1_ENDPOINT_IDS[0]
-ENDPOINT_ID_R1_13_1_2   = DEVICE_R1_ENDPOINT_IDS[1]
-DEVICE_R1_CONNECT_RULES = json_device_connect_rules(DEVICE_R1_ADDRESS, DEVICE_R1_PORT, {
-    'username': DEVICE_R1_USERNAME,
-    'password': DEVICE_R1_PASSWORD,
-    'timeout' : DEVICE_R1_TIMEOUT,
-}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R1_ENDPOINT_DEFS)
-
-
-DEVICE_R2_UUID          = 'R2-EMU'
-DEVICE_R2_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R2_ID            = json_device_id(DEVICE_R2_UUID)
-#DEVICE_R2_ENDPOINTS     = json_endpoints(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS)
-DEVICE_R2_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS)
-DEVICE_R2               = json_device_emulated_packet_router_disabled(DEVICE_R2_UUID)
-ENDPOINT_ID_R2_13_0_0   = DEVICE_R2_ENDPOINT_IDS[0]
-ENDPOINT_ID_R2_13_1_2   = DEVICE_R2_ENDPOINT_IDS[1]
-DEVICE_R2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R2_ENDPOINT_DEFS)
-
-
-DEVICE_R3_UUID          = 'R3-EMU'
-DEVICE_R3_TIMEOUT       = 120
-DEVICE_R3_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R3_ID            = json_device_id(DEVICE_R3_UUID)
-#DEVICE_R3_ENDPOINTS     = json_endpoints(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS)
-DEVICE_R3_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS)
-DEVICE_R3               = json_device_packetrouter_disabled(DEVICE_R3_UUID)
-ENDPOINT_ID_R3_13_0_0   = DEVICE_R3_ENDPOINT_IDS[0]
-ENDPOINT_ID_R3_13_1_2   = DEVICE_R3_ENDPOINT_IDS[1]
-DEVICE_R3_CONNECT_RULES = json_device_connect_rules(DEVICE_R3_ADDRESS, DEVICE_R3_PORT, {
-    'username': DEVICE_R3_USERNAME,
-    'password': DEVICE_R3_PASSWORD,
-    'timeout' : DEVICE_R3_TIMEOUT,
-}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R3_ENDPOINT_DEFS)
-
-
-DEVICE_R4_UUID          = 'R4-EMU'
-DEVICE_R4_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R4_ID            = json_device_id(DEVICE_R4_UUID)
-#DEVICE_R4_ENDPOINTS     = json_endpoints(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS)
-DEVICE_R4_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS)
-DEVICE_R4               = json_device_emulated_packet_router_disabled(DEVICE_R4_UUID)
-ENDPOINT_ID_R4_13_0_0   = DEVICE_R4_ENDPOINT_IDS[0]
-ENDPOINT_ID_R4_13_1_2   = DEVICE_R4_ENDPOINT_IDS[1]
-DEVICE_R4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R4_ENDPOINT_DEFS)
-
-
-DEVICE_O1_UUID          = 'O1-OLS'
-DEVICE_O1_TIMEOUT       = 120
-DEVICE_O1_ENDPOINT_DEFS = [
-    ('aade6001-f00b-5e2f-a357-6a0a9d3de870', 'optical', []), # node_1_port_13
-    ('eb287d83-f05e-53ec-ab5a-adf6bd2b5418', 'optical', []), # node_2_port_13
-    ('0ef74f99-1acc-57bd-ab9d-4b958b06c513', 'optical', []), # node_3_port_13
-    ('50296d99-58cc-5ce7-82f5-fc8ee4eec2ec', 'optical', []), # node_4_port_13
-]
-DEVICE_O1_ID            = json_device_id(DEVICE_O1_UUID)
-DEVICE_O1               = json_device_tapi_disabled(DEVICE_O1_UUID)
-#DEVICE_O1_ENDPOINTS     = json_endpoints(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS)
-DEVICE_O1_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS)
-ENDPOINT_ID_O1_EP1      = DEVICE_O1_ENDPOINT_IDS[0]
-ENDPOINT_ID_O1_EP2      = DEVICE_O1_ENDPOINT_IDS[1]
-ENDPOINT_ID_O1_EP3      = DEVICE_O1_ENDPOINT_IDS[2]
-ENDPOINT_ID_O1_EP4      = DEVICE_O1_ENDPOINT_IDS[3]
-DEVICE_O1_CONNECT_RULES = json_device_connect_rules(DEVICE_O1_ADDRESS, DEVICE_O1_PORT, {
-    'timeout' : DEVICE_O1_TIMEOUT,
-}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_O1_ENDPOINT_DEFS)
-
-
-# ----- Links ----------------------------------------------------------------------------------------------------------
-LINK_R1_O1_UUID = get_link_uuid(DEVICE_R1_ID, ENDPOINT_ID_R1_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP1)
-LINK_R1_O1_ID   = json_link_id(LINK_R1_O1_UUID)
-LINK_R1_O1      = json_link(LINK_R1_O1_UUID, [ENDPOINT_ID_R1_13_0_0, ENDPOINT_ID_O1_EP1])
-
-LINK_R2_O1_UUID = get_link_uuid(DEVICE_R2_ID, ENDPOINT_ID_R2_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP2)
-LINK_R2_O1_ID   = json_link_id(LINK_R2_O1_UUID)
-LINK_R2_O1      = json_link(LINK_R2_O1_UUID, [ENDPOINT_ID_R2_13_0_0, ENDPOINT_ID_O1_EP2])
-
-LINK_R3_O1_UUID = get_link_uuid(DEVICE_R3_ID, ENDPOINT_ID_R3_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP3)
-LINK_R3_O1_ID   = json_link_id(LINK_R3_O1_UUID)
-LINK_R3_O1      = json_link(LINK_R3_O1_UUID, [ENDPOINT_ID_R3_13_0_0, ENDPOINT_ID_O1_EP3])
-
-LINK_R4_O1_UUID = get_link_uuid(DEVICE_R4_ID, ENDPOINT_ID_R4_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP4)
-LINK_R4_O1_ID   = json_link_id(LINK_R4_O1_UUID)
-LINK_R4_O1      = json_link(LINK_R4_O1_UUID, [ENDPOINT_ID_R4_13_0_0, ENDPOINT_ID_O1_EP4])
-
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.EndPoint import json_endpoint_id
+from tests.tools.mock_osm.Tools import connection_point, wim_mapping
 
 # ----- WIM Service Settings -------------------------------------------------------------------------------------------
 
-def compose_service_endpoint_id(endpoint_id):
-    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
-    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
-    return ':'.join([device_uuid, endpoint_uuid])
-
-WIM_SEP_R1_ID      = compose_service_endpoint_id(ENDPOINT_ID_R1_13_1_2)
-WIM_SEP_R1_SITE_ID = '1'
-WIM_SEP_R1_BEARER  = WIM_SEP_R1_ID
-WIM_SRV_R1_VLAN_ID = 400
+WIM_DC1_SITE_ID     = '1'
+WIM_DC1_DEVICE_ID   = json_device_id('R1-EMU')
+WIM_DC1_ENDPOINT_ID = json_endpoint_id(WIM_DC1_DEVICE_ID, '13/1/2')
 
-WIM_SEP_R3_ID      = compose_service_endpoint_id(ENDPOINT_ID_R3_13_1_2)
-WIM_SEP_R3_SITE_ID = '2'
-WIM_SEP_R3_BEARER  = WIM_SEP_R3_ID
-WIM_SRV_R3_VLAN_ID = 500
+WIM_DC2_SITE_ID     = '2'
+WIM_DC2_DEVICE_ID   = json_device_id('R3-EMU')
+WIM_DC2_ENDPOINT_ID = json_endpoint_id(WIM_DC2_DEVICE_ID, '13/1/2')
 
-WIM_USERNAME = 'admin'
-WIM_PASSWORD = 'admin'
+WIM_SEP_DC1, WIM_MAP_DC1 = wim_mapping(WIM_DC1_SITE_ID, WIM_DC1_ENDPOINT_ID)
+WIM_SEP_DC2, WIM_MAP_DC2 = wim_mapping(WIM_DC2_SITE_ID, WIM_DC2_ENDPOINT_ID)
+WIM_MAPPING  = [WIM_MAP_DC1, WIM_MAP_DC2]
 
-WIM_MAPPING  = [
-    {'device-id': DEVICE_R1_UUID, 'service_endpoint_id': WIM_SEP_R1_ID,
-     'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R1_BEARER}, 'site-id': WIM_SEP_R1_SITE_ID}},
-    {'device-id': DEVICE_R3_UUID, 'service_endpoint_id': WIM_SEP_R3_ID,
-     'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R3_BEARER}, 'site-id': WIM_SEP_R3_SITE_ID}},
-]
+WIM_SRV_VLAN_ID = 300
 WIM_SERVICE_TYPE = 'ELINE'
 WIM_SERVICE_CONNECTION_POINTS = [
-    {'service_endpoint_id': WIM_SEP_R1_ID,
-        'service_endpoint_encapsulation_type': 'dot1q',
-        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R1_VLAN_ID}},
-    {'service_endpoint_id': WIM_SEP_R3_ID,
-        'service_endpoint_encapsulation_type': 'dot1q',
-        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R3_VLAN_ID}},
+    connection_point(WIM_SEP_DC1, 'dot1q', WIM_SRV_VLAN_ID),
+    connection_point(WIM_SEP_DC2, 'dot1q', WIM_SRV_VLAN_ID),
 ]
-
-# ----- Object Collections ---------------------------------------------------------------------------------------------
-
-CONTEXTS = [CONTEXT]
-TOPOLOGIES = [TOPOLOGY]
-
-DEVICES = [
-    (DEVICE_R1, DEVICE_R1_CONNECT_RULES),
-    (DEVICE_R2, DEVICE_R2_CONNECT_RULES),
-    (DEVICE_R3, DEVICE_R3_CONNECT_RULES),
-    (DEVICE_R4, DEVICE_R4_CONNECT_RULES),
-    (DEVICE_O1, DEVICE_O1_CONNECT_RULES),
-]
-
-LINKS = [LINK_R1_O1, LINK_R2_O1, LINK_R3_O1, LINK_R4_O1]
\ No newline at end of file
diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py
index 65b7cece1625032d8e02a5962d49d892e29d615a..71deb9d596b1494e148b140902ca927e5d664dd3 100644
--- a/src/tests/ofc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ofc22/tests/test_functional_bootstrap.py
@@ -12,27 +12,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, logging, pytest
-from common.Settings import get_setting
+import logging, time
+from common.proto.context_pb2 import ContextId, Empty
 from common.proto.monitoring_pb2 import KpiDescriptorList
-from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
+from common.tests.LoadScenario import load_scenario_from_descriptor
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
-from monitoring.client.MonitoringClient import MonitoringClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology
 from device.client.DeviceClient import DeviceClient
-from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
-from tests.Fixtures import context_client, device_client, monitoring_client
+from monitoring.client.MonitoringClient import MonitoringClient
+from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 
-def test_scenario_empty(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+def test_scenario_bootstrap(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,   # pylint: disable=redefined-outer-name
+) -> None:
     # ----- List entities - Ensure database is empty -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
     assert len(response.contexts) == 0
@@ -44,160 +43,53 @@ def test_scenario_empty(context_client : ContextClient):  # pylint: disable=rede
     assert len(response.links) == 0
 
 
-def test_prepare_scenario(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Create Contexts and Topologies -----------------------------------------------------------------------------
-    for context in CONTEXTS:
-        context_uuid = context['context_id']['context_uuid']['uuid']
-        LOGGER.info('Adding Context {:s}'.format(context_uuid))
-        response = context_client.SetContext(Context(**context))
-        assert response.context_uuid.uuid == context_uuid
-        #expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid)))
-
-    for topology in TOPOLOGIES:
-        context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
-        topology_uuid = topology['topology_id']['topology_uuid']['uuid']
-        LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
-        response = context_client.SetTopology(Topology(**topology))
-        assert response.context_id.context_uuid.uuid == context_uuid
-        assert response.topology_uuid.uuid == topology_uuid
-        context_id = json_context_id(context_uuid)
-        #expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id)))
+    # ----- Load Scenario ----------------------------------------------------------------------------------------------
+    descriptor_loader = load_scenario_from_descriptor(
+        DESCRIPTOR_FILE, context_client, device_client, None, None)
 
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
 
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_scenario_ready(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
-
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
-
-
-def test_devices_bootstraping(
-    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client, log_events_received=True)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Create Devices and Validate Collected Events ---------------------------------------------------------------
-    for device, connect_rules in DEVICES:
-        device_uuid = device['device_id']['device_uuid']['uuid']
-        LOGGER.info('Adding Device {:s}'.format(device_uuid))
-
-        device_with_connect_rules = copy.deepcopy(device)
-        device_with_connect_rules['device_config']['config_rules'].extend(connect_rules)
-        response = device_client.AddDevice(Device(**device_with_connect_rules))
-        assert response.device_uuid.uuid == device_uuid
-
-        #expected_events.extend([
-        #    # Device creation, update for automation to start the device
-        #    ('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)),
-        #    #('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)),
-        #])
-
-        #response = context_client.GetDevice(response)
-        #for endpoint in response.device_endpoints:
-        #    for _ in endpoint.kpi_sample_types:
-        #        # Monitoring configures monitoring for endpoint
-        #        expected_events.append(('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)))
-
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_devices_bootstrapped(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure bevices are created -----------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
-
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
-
-
-def test_links_creation(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Create Links and Validate Collected Events -----------------------------------------------------------------
-    for link in LINKS:
-        link_uuid = link['link_id']['link_uuid']['uuid']
-        LOGGER.info('Adding Link {:s}'.format(link_uuid))
-        response = context_client.SetLink(Link(**link))
-        assert response.link_uuid.uuid == link_uuid
-        #expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid)))
-
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_links_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure links are created -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
+    assert len(response.links) == descriptor_loader.num_links
 
+    for context_uuid, _ in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
 
-def test_scenario_kpis_created(monitoring_client: MonitoringClient):
+def test_scenario_kpis_created(
+    context_client : ContextClient,         # pylint: disable=redefined-outer-name
+    monitoring_client: MonitoringClient,    # pylint: disable=redefined-outer-name
+) -> None:
     """
     This test validates that KPIs related to the service/device/endpoint were created
     during the service creation process.
     """
-    response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty())
-    # TODO: replace the magic number `16` below for a formula that adapts to the number
-    # of links and devices
-    assert len(response.kpi_descriptor_list) >= 16
+    response = context_client.ListDevices(Empty())
+    kpis_expected = set()
+    for device in response.devices:
+        device_uuid = device.device_id.device_uuid.uuid
+        for endpoint in device.device_endpoints:
+            endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+            for kpi_sample_type in endpoint.kpi_sample_types:
+                kpis_expected.add((device_uuid, endpoint_uuid, kpi_sample_type))
+    num_kpis_expected = len(kpis_expected)
+    LOGGER.info('Num KPIs expected: {:d}'.format(num_kpis_expected))
+
+    num_kpis_created, num_retry = 0, 0
+    while (num_kpis_created != num_kpis_expected) and (num_retry < 5):
+        response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty())
+        num_kpis_created = len(response.kpi_descriptor_list)
+        LOGGER.info('Num KPIs created: {:d}'.format(num_kpis_created))
+        time.sleep(0.5)
+        num_retry += 1
+    assert num_kpis_created == num_kpis_expected
diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py
index b0dfe54900f5a806607fcd669942e7fa592dcbaa..be807eaa0242f2363b5b6c189ce4de264528a54c 100644
--- a/src/tests/ofc22/tests/test_functional_cleanup.py
+++ b/src/tests/ofc22/tests/test_functional_cleanup.py
@@ -12,93 +12,63 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, pytest
-from common.Settings import get_setting
-from common.tests.EventTools import EVENT_REMOVE, check_events
+import logging
+from common.tools.descriptor.Loader import DescriptorLoader
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
-from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
 from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
+from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
-from tests.Fixtures import context_client, device_client
-from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+from tests.Fixtures import context_client, device_client    # pylint: disable=unused-import
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 
-def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+
+def test_services_removed(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,   # pylint: disable=redefined-outer-name
+) -> None:
     # ----- List entities - Ensure service is removed ------------------------------------------------------------------
+    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(descriptors)
+
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
-
-
-def test_scenario_cleanup(
-    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Delete Links and Validate Collected Events -----------------------------------------------------------------
-    for link in LINKS:
-        link_id = link['link_id']
-        link_uuid = link_id['link_uuid']['uuid']
-        LOGGER.info('Deleting Link {:s}'.format(link_uuid))
-        context_client.RemoveLink(LinkId(**link_id))
-        #expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid)))
-
-    # ----- Delete Devices and Validate Collected Events ---------------------------------------------------------------
-    for device, _ in DEVICES:
-        device_id = device['device_id']
-        device_uuid = device_id['device_uuid']['uuid']
-        LOGGER.info('Deleting Device {:s}'.format(device_uuid))
-        device_client.DeleteDevice(DeviceId(**device_id))
-        #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
-
-    # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------
-    for topology in TOPOLOGIES:
-        topology_id = topology['topology_id']
-        context_uuid = topology_id['context_id']['context_uuid']['uuid']
-        topology_uuid = topology_id['topology_uuid']['uuid']
-        LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
-        context_client.RemoveTopology(TopologyId(**topology_id))
-        context_id = json_context_id(context_uuid)
-        #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
-
-    # ----- Delete Contexts and Validate Collected Events --------------------------------------------------------------
-    for context in CONTEXTS:
-        context_id = context['context_id']
-        context_uuid = context_id['context_uuid']['uuid']
-        LOGGER.info('Deleting Context {:s}'.format(context_uuid))
-        context_client.RemoveContext(ContextId(**context_id))
-        #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
-
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_scenario_empty_again(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    assert len(response.links) == descriptor_loader.num_links
+
+    for context_uuid, _ in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
+
+
+    # ----- Delete Links, Devices, Topologies, Contexts ----------------------------------------------------------------
+    for link in descriptor_loader.links:
+        context_client.RemoveLink(LinkId(**link['link_id']))
+
+    for device in descriptor_loader.devices:
+        device_client .DeleteDevice(DeviceId(**device['device_id']))
+
+    for context_uuid, topology_list in descriptor_loader.topologies.items():
+        for topology in topology_list:
+            context_client.RemoveTopology(TopologyId(**topology['topology_id']))
+
+    for context in descriptor_loader.contexts:
+        context_client.RemoveContext(ContextId(**context['context_id']))
+
+
     # ----- List entities - Ensure database is empty again -------------------------------------------------------------
     response = context_client.ListContexts(Empty())
     assert len(response.contexts) == 0
diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py
index 5615f119b91fba10dd767d7188b303f926750e06..e606d060d52631ba72e191d7c025bd7b43048b39 100644
--- a/src/tests/ofc22/tests/test_functional_create_service.py
+++ b/src/tests/ofc22/tests/test_functional_create_service.py
@@ -12,24 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, pytest, random, time
+import logging, random
 from common.DeviceTypes import DeviceTypeEnum
-from common.Settings import get_setting
-from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
-from common.tools.object_factory.Connection import json_connection_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Service import json_service_id
+from common.proto.context_pb2 import ContextId, Empty
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.tools.descriptor.Loader import DescriptorLoader
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from compute.tests.mock_osm.MockOSM import MockOSM
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from monitoring.client.MonitoringClient import MonitoringClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ContextId, Empty
-from tests.Fixtures import context_client, monitoring_client
-from .Fixtures import osm_wim
-from .Objects import (
-    CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES,
-    WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import
+from tests.tools.mock_osm.MockOSM import MockOSM
+from .Fixtures import osm_wim # pylint: disable=unused-import
+from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
@@ -37,89 +32,69 @@ LOGGER.setLevel(logging.DEBUG)
 DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
 DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
+
+def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
+    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(descriptors)
 
-def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure links are created -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
+    for context_uuid, num_services in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
 
 
-def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    # TODO: restablish the tests of the events
-    # events_collector = EventsCollector(context_client, log_events_received=True)
-    # events_collector.start()
-
     # ----- Create Service ---------------------------------------------------------------------------------------------
     service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
     osm_wim.get_connectivity_service_status(service_uuid)
 
-    # ----- Validate collected events ----------------------------------------------------------------------------------
-
-    # packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
-    # optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
-    # optical_service_uuid = '{:s}:optical'.format(service_uuid)
-
-    # expected_events = [
-    #    # Create packet service and add first endpoint
-    #    ('ServiceEvent',    EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    
-    #    # Configure OLS controller, create optical service, create optical connection
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
-    #    ('ServiceEvent',    EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
-    #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)),
-    
-    #    # Configure endpoint packet devices, add second endpoint to service, create connection
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
-    #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)),
-    # ]
-    # check_events(events_collector, expected_events)
-
-    # # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    # events_collector.stop()
-
-
-def test_scenario_service_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+
     # ----- List entities - Ensure service is created ------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
+
+    for context_uuid, num_services in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+        assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI)
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # L3NM + TAPI
-    for service in response.services:
-        service_id = service.service_id
-        response = context_client.ListConnections(service_id)
-        LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
-            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
-        assert len(response.connections) == 1 # one connection per service
+        for service in response.services:
+            service_id = service.service_id
+            response = context_client.ListConnections(service_id)
+            LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+                grpc_message_to_json_string(service_id), len(response.connections),
+                grpc_message_to_json_string(response)))
+            assert len(response.connections) == 1 # one connection per service
 
 
-def test_scenario_kpi_values_created(monitoring_client: MonitoringClient):
+def test_scenario_kpi_values_created(
+    monitoring_client: MonitoringClient,    # pylint: disable=redefined-outer-name
+) -> None:
     """
     This test validates that KPI values have been inserted into the monitoring database.
     We short k KPI descriptors to test.
@@ -128,6 +103,22 @@ def test_scenario_kpi_values_created(monitoring_client: MonitoringClient):
     kpi_descriptors = random.choices(response.kpi_descriptor_list, k=2)
 
     for kpi_descriptor in kpi_descriptors:
+        MSG = 'KPI(kpi_uuid={:s}, device_uuid={:s}, endpoint_uuid={:s}, service_uuid={:s}, kpi_sample_type={:s})...'
+        LOGGER.info(MSG.format(
+            str(kpi_descriptor.kpi_id.kpi_id.uuid), str(kpi_descriptor.device_id.device_uuid.uuid),
+            str(kpi_descriptor.endpoint_id.endpoint_uuid.uuid), str(kpi_descriptor.service_id.service_uuid.uuid),
+            str(KpiSampleType.Name(kpi_descriptor.kpi_sample_type))))
         response = monitoring_client.GetInstantKpi(kpi_descriptor.kpi_id)
-        assert response.kpi_id.kpi_id.uuid == kpi_descriptor.kpi_id.kpi_id.uuid
-        assert response.timestamp.timestamp > 0
+        kpi_uuid = response.kpi_id.kpi_id.uuid
+        assert kpi_uuid == kpi_descriptor.kpi_id.kpi_id.uuid
+        kpi_value_type = response.kpi_value.WhichOneof('value')
+        if kpi_value_type is None:
+            MSG = '  KPI({:s}): No instant value found'
+            LOGGER.warning(MSG.format(str(kpi_uuid)))
+        else:
+            kpi_timestamp = response.timestamp.timestamp
+            assert kpi_timestamp > 0
+            assert kpi_value_type == 'floatVal'
+            kpi_value = getattr(response.kpi_value, kpi_value_type)
+            MSG = '  KPI({:s}): timestamp={:s} value_type={:s} value={:s}'
+            LOGGER.info(MSG.format(str(kpi_uuid), str(kpi_timestamp), str(kpi_value_type), str(kpi_value)))
diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py
index 5d9568cd81906ac76b600a2253a5e0bdf741bc01..0f8d088012bed164e4603a813bfe9154eda8f568 100644
--- a/src/tests/ofc22/tests/test_functional_delete_service.py
+++ b/src/tests/ofc22/tests/test_functional_delete_service.py
@@ -12,23 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, pytest
+import logging
+from common.Constants import DEFAULT_CONTEXT_UUID
 from common.DeviceTypes import DeviceTypeEnum
-from common.Settings import get_setting
-from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events
-from common.tools.object_factory.Connection import json_connection_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Service import json_service_id
+from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.tools.descriptor.Loader import DescriptorLoader
+from common.tools.object_factory.Context import json_context_id
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
-from tests.Fixtures import context_client
-from .Fixtures import osm_wim
-from .Objects import (
-    CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING,
-    WIM_PASSWORD, WIM_USERNAME)
+from tests.Fixtures import context_client   # pylint: disable=unused-import
+from tests.tools.mock_osm.MockOSM import MockOSM
+from .Fixtures import osm_wim # pylint: disable=unused-import
 
 
 LOGGER = logging.getLogger(__name__)
@@ -37,86 +31,69 @@ LOGGER.setLevel(logging.DEBUG)
 DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
 DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 
-def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+
+def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure service is created ------------------------------------------------------------------
+    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(descriptors)
+
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # L3NM + TAPI
+    l3nm_service_uuids = set()
+    response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)))
+    assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI)
     for service in response.services:
         service_id = service.service_id
+
+        if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
+            service_uuid = service_id.service_uuid.uuid
+            l3nm_service_uuids.add(service_uuid)
+            osm_wim.conn_info[service_uuid] = {}
+
         response = context_client.ListConnections(service_id)
         LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
-            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
+            grpc_message_to_json_string(service_id), len(response.connections),
+            grpc_message_to_json_string(response)))
         assert len(response.connections) == 1 # one connection per service
 
+    # Identify service to delete
+    assert len(l3nm_service_uuids) == 1  # assume a single L3NM service has been created
+    l3nm_service_uuid = set(l3nm_service_uuids).pop()
 
-def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client, log_events_received=True)
-    #events_collector.start()
 
     # ----- Delete Service ---------------------------------------------------------------------------------------------
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # L3NM + TAPI
-    service_uuids = set()
-    for service in response.services:
-        if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: continue
-        service_uuid = service.service_id.service_uuid.uuid
-        service_uuids.add(service_uuid)
-        osm_wim.conn_info[service_uuid] = {}
-
-    assert len(service_uuids) == 1  # assume a single L3NM service has been created
-    service_uuid = set(service_uuids).pop()
-
-    osm_wim.delete_connectivity_service(service_uuid)
-
-    # ----- Validate collected events ----------------------------------------------------------------------------------
-    #packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
-    #optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
-    #optical_service_uuid = '{:s}:optical'.format(service_uuid)
-
-    #expected_events = [
-    #    ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
-    #    ('ServiceEvent',    EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    #    ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
-    #    ('ServiceEvent',    EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
-    #]
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
+    osm_wim.delete_connectivity_service(l3nm_service_uuid)
 
 
-def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure service is removed ------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
+    for context_uuid, num_services in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
diff --git a/src/tests/tools/__init__.py b/src/tests/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/tools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/tools/mock_osm/Constants.py b/src/tests/tools/mock_osm/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..44d74169f0fd68073ca4ed5272f3dc7ef3ebf958
--- /dev/null
+++ b/src/tests/tools/mock_osm/Constants.py
@@ -0,0 +1,16 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+WIM_USERNAME = 'admin'
+WIM_PASSWORD = 'admin'
diff --git a/src/compute/tests/mock_osm/MockOSM.py b/src/tests/tools/mock_osm/MockOSM.py
similarity index 100%
rename from src/compute/tests/mock_osm/MockOSM.py
rename to src/tests/tools/mock_osm/MockOSM.py
diff --git a/src/tests/tools/mock_osm/Tools.py b/src/tests/tools/mock_osm/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..25a8b6111443424e8bfd2b35501b96a9a762325f
--- /dev/null
+++ b/src/tests/tools/mock_osm/Tools.py
@@ -0,0 +1,48 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Optional
+
+def compose_service_endpoint_id(site_id : str, endpoint_id : Dict):
+    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
+    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
+    return ':'.join([site_id, device_uuid, endpoint_uuid])
+
+def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, priority=None, redundant=[]):
+    ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
+    ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
+    service_endpoint_id = compose_service_endpoint_id(site_id, ce_endpoint_id)
+    if pe_device_id is None:
+        bearer = '{:s}:{:s}'.format(ce_device_uuid, ce_endpoint_uuid)
+    else:
+        pe_device_uuid = pe_device_id['device_uuid']['uuid']
+        bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
+    mapping = {
+        'service_endpoint_id': service_endpoint_id,
+        'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
+        'service_mapping_info': {
+            'site-id': site_id,
+            'bearer': {'bearer-reference': bearer},
+        }
+    }
+    if priority is not None: mapping['service_mapping_info']['priority'] = priority
+    if len(redundant) > 0: mapping['service_mapping_info']['redundant'] = redundant
+    return service_endpoint_id, mapping
+
+def connection_point(service_endpoint_id : str, encapsulation_type : str, vlan_id : int):
+    return {
+        'service_endpoint_id': service_endpoint_id,
+        'service_endpoint_encapsulation_type': encapsulation_type,
+        'service_endpoint_encapsulation_info': {'vlan': vlan_id}
+    }
diff --git a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py
similarity index 100%
rename from src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py
rename to src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py
diff --git a/src/tests/tools/mock_osm/__init__.py b/src/tests/tools/mock_osm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/tools/mock_osm/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/compute/tests/mock_osm/acknowledgements.txt b/src/tests/tools/mock_osm/acknowledgements.txt
similarity index 100%
rename from src/compute/tests/mock_osm/acknowledgements.txt
rename to src/tests/tools/mock_osm/acknowledgements.txt
diff --git a/src/compute/tests/mock_osm/sdnconn.py b/src/tests/tools/mock_osm/sdnconn.py
similarity index 100%
rename from src/compute/tests/mock_osm/sdnconn.py
rename to src/tests/tools/mock_osm/sdnconn.py
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 979d0664bc42221e3559eef498bd53562fe073e7..0e008734730867bca741d748c49e3b0589b40e48 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -14,8 +14,8 @@
 
 import json, logging, re
 from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
-from common.proto.context_pb2 import (
-    Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList, TopologyId, TopologyIdList)
+from common.proto.context_pb2 import Empty, ContextIdList, TopologyId, TopologyIdList
+from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology_id
@@ -23,9 +23,6 @@ from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
-from webui.service.main.DescriptorTools import (
-    format_custom_config_rules, get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices,
-    get_descriptors_add_topologies, split_devices_by_rules)
 from webui.service.main.forms import ContextTopologyForm, DescriptorForm
 
 main = Blueprint('main', __name__)
@@ -37,38 +34,6 @@ slice_client = SliceClient()
 
 logger = logging.getLogger(__name__)
 
-ENTITY_TO_TEXT = {
-    # name   => singular,    plural
-    'context'   : ('Context',    'Contexts'   ),
-    'topology'  : ('Topology',   'Topologies' ),
-    'device'    : ('Device',     'Devices'    ),
-    'link'      : ('Link',       'Links'      ),
-    'service'   : ('Service',    'Services'   ),
-    'slice'     : ('Slice',      'Slices'     ),
-    'connection': ('Connection', 'Connections'),
-}
-
-ACTION_TO_TEXT = {
-    # action =>  infinitive,  past
-    'add'     : ('Add',       'Added'),
-    'update'  : ('Update',    'Updated'),
-    'config'  : ('Configure', 'Configured'),
-}
-
-def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities):
-    entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name]
-    action_infinitive, action_past = ACTION_TO_TEXT[action_name]
-    num_ok, num_err = 0, 0
-    for entity in entities:
-        try:
-            grpc_method(grpc_class(**entity))
-            num_ok += 1
-        except Exception as e: # pylint: disable=broad-except
-            flash(f'Unable to {action_infinitive} {entity_name_singluar} {str(entity)}: {str(e)}', 'error')
-            num_err += 1
-    if num_ok : flash(f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')
-    if num_err: flash(f'{str(num_err)} {entity_name_plural} failed', 'danger')
-
 def process_descriptors(descriptors):
     try:
         descriptors_file = request.files[descriptors.name]
@@ -78,80 +43,10 @@ def process_descriptors(descriptors):
         flash(f'Unable to load descriptor file: {str(e)}', 'danger')
         return
 
-    dummy_mode  = descriptors.get('dummy_mode' , False)
-    contexts    = descriptors.get('contexts'   , [])
-    topologies  = descriptors.get('topologies' , [])
-    devices     = descriptors.get('devices'    , [])
-    links       = descriptors.get('links'      , [])
-    services    = descriptors.get('services'   , [])
-    slices      = descriptors.get('slices'     , [])
-    connections = descriptors.get('connections', [])
-
-    # Format CustomConfigRules in Devices, Services and Slices provided in JSON format
-    for device in devices:
-        config_rules = device.get('device_config', {}).get('config_rules', [])
-        config_rules = format_custom_config_rules(config_rules)
-        device['device_config']['config_rules'] = config_rules
-
-    for service in services:
-        config_rules = service.get('service_config', {}).get('config_rules', [])
-        config_rules = format_custom_config_rules(config_rules)
-        service['service_config']['config_rules'] = config_rules
-
-    for slice in slices:
-        config_rules = slice.get('slice_config', {}).get('config_rules', [])
-        config_rules = format_custom_config_rules(config_rules)
-        slice['slice_config']['config_rules'] = config_rules
-
-
-    # Context and Topology require to create the entity first, and add devices, links, services, slices, etc. in a
-    # second stage.
-    contexts_add = get_descriptors_add_contexts(contexts)
-    topologies_add = get_descriptors_add_topologies(topologies)
-
-    if dummy_mode:
-        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
-        context_client.connect()
-        process_descriptor('context',    'add',    context_client.SetContext,    Context,    contexts_add  )
-        process_descriptor('topology',   'add',    context_client.SetTopology,   Topology,   topologies_add)
-        process_descriptor('device',     'add',    context_client.SetDevice,     Device,     devices       )
-        process_descriptor('link',       'add',    context_client.SetLink,       Link,       links         )
-        process_descriptor('service',    'add',    context_client.SetService,    Service,    services      )
-        process_descriptor('slice',      'add',    context_client.SetSlice,      Slice,      slices        )
-        process_descriptor('connection', 'add',    context_client.SetConnection, Connection, connections   )
-        process_descriptor('context',    'update', context_client.SetContext,    Context,    contexts      )
-        process_descriptor('topology',   'update', context_client.SetTopology,   Topology,   topologies    )
-        context_client.close()
-    else:
-        # Normal mode: follows the automated workflows in the different components
-        assert len(connections) == 0, 'in normal mode, connections should not be set'
-
-        # Device, Service and Slice require to first create the entity and the configure it
-        devices_add, devices_config = split_devices_by_rules(devices)
-        services_add = get_descriptors_add_services(services)
-        slices_add = get_descriptors_add_slices(slices)
-
-        context_client.connect()
-        device_client.connect()
-        service_client.connect()
-        slice_client.connect()
-
-        process_descriptor('context',    'add',    context_client.SetContext,      Context,    contexts_add  )
-        process_descriptor('topology',   'add',    context_client.SetTopology,     Topology,   topologies_add)
-        process_descriptor('device',     'add',    device_client .AddDevice,       Device,     devices_add   )
-        process_descriptor('device',     'config', device_client .ConfigureDevice, Device,     devices_config)
-        process_descriptor('link',       'add',    context_client.SetLink,         Link,       links         )
-        process_descriptor('service',    'add',    service_client.CreateService,   Service,    services_add  )
-        process_descriptor('service',    'update', service_client.UpdateService,   Service,    services      )
-        process_descriptor('slice',      'add',    slice_client  .CreateSlice,     Slice,      slices_add    )
-        process_descriptor('slice',      'update', slice_client  .UpdateSlice,     Slice,      slices        )
-        process_descriptor('context',    'update', context_client.SetContext,      Context,    contexts      )
-        process_descriptor('topology',   'update', context_client.SetTopology,     Topology,   topologies    )
-
-        slice_client.close()
-        service_client.close()
-        device_client.close()
-        context_client.close()
+    descriptor_loader = DescriptorLoader(descriptors)
+    results = descriptor_loader.process()
+    for message,level in compose_notifications(results):
+        flash(message, level)
 
 @main.route('/', methods=['GET', 'POST'])
 def home():
@@ -191,7 +86,7 @@ def home():
         if descriptor_form.validate_on_submit():
             process_descriptors(descriptor_form.descriptors)
             return redirect(url_for("main.home"))
-    except Exception as e:
+    except Exception as e: # pylint: disable=broad-except
         logger.exception('Descriptor load failed')
         flash(f'Descriptor load failed: `{str(e)}`', 'danger')
     finally:
diff --git a/tutorial/2-2-ofc22.md b/tutorial/2-2-ofc22.md
index 3b55a0961da78fdc78a8feb31499608589b9d0be..04d585d24cc046e6a1aadc1c93118a1b36855aca 100644
--- a/tutorial/2-2-ofc22.md
+++ b/tutorial/2-2-ofc22.md
@@ -37,9 +37,6 @@ environment and a TeraFlowSDN controller instance as described in the
 [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python
 environment as described in
 [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
-Remember to source the scenario settings, e.g., `cd ~/tfs-ctrl && source ofc22/deploy_specs.sh` in each terminal you open.
-Then, re-build the protocol buffers code from the proto files:
-`./proto/generate_code_python.sh`
 
 
 ## 2.2.4. Access to the WebUI and Dashboard
@@ -55,25 +52,33 @@ Notes:
 
 ## 2.2.5. Test execution
 
-Before executing the tests, the environment variables need to be prepared. 
-First, make sure to load your deployment variables by:
+Before executing the tests, we need to prepare a few things.
+
+First, you need to make sure that you have all the gRPC-generate code in your folder.
+To do so, run:
 
 ```
-source my_deploy.sh
+proto/generate_code_python.sh
 ```
 
-Then, you also need to load the environment variables to support the execution of the 
-tests by:
+Then, it is time to deploy TeraFlowSDN with the correct specification for this scenario.
+Make sure to load your deployment variables for this scenario by:
 
 ```
-source tfs_runtime_env_vars.sh
+source ofc22/deploy_specs.sh
 ```
 
-You also need to make sure that you have all the gRPC-generate code in your folder.
-To do so, run:
+Then, you need to deploy the components by running:
 
 ```
-proto/generate_code_python.sh
+./deploy.sh
+```
+
+After the deployment is finished, you need to load the environment variables to support 
+the execution of the tests by:
+
+```
+source tfs_runtime_env_vars.sh
 ```
 
 To execute this functional test, four main steps needs to be carried out:
@@ -90,8 +95,24 @@ See the troubleshooting section if needed.
 You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts
 after you execute each step.
 
+There are two ways to execute the functional tests, *running all the tests with a single script* or *running each test independently*.
+In the following we start with the first option, then we comment on how to run each test independently.
+
+
+### 2.2.5.1. Running all tests with a single script
+
+We have a script that executes all the steps at once.
+It is meant for being used to test if all components involved in this scenario are working correct.
+To run all the functional tests, you can run:
+
+```
+ofc22/run_tests_and_coverage.sh
+```
+
+The following sections explain each one of the steps.
 
-### 2.2.5.1. Device bootstrapping
+
+### 2.2.5.2. Device bootstrapping
 
 This step configures some basic entities (Context and Topology), the devices, and the 
 links in the topology.
@@ -103,7 +124,11 @@ The expected results are:
 
 To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that
 contains the descriptors of the contexts, topologies, devices, and links, or by 
-executing the `./ofc22/run_test_01_bootstrap.sh` script.
+executing the script:
+
+```
+./ofc22/run_test_01_bootstrap.sh
+```
 
 When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you 
 should see the monitoring data being plotted and updated every 5 seconds (by default). 
@@ -117,12 +142,16 @@ Note here that the emulated devices produce synthetic randomly-generated monitor
 and do not represent any particularservices configured.
 
 
-### 2.2.5.2. L3VPN Service creation
+### 2.2.5.3. L3VPN Service creation
 
 This step configures a new service emulating the request an OSM WIM would make by means 
 of a Mock OSM instance.
 
-To run this step, execute the `./ofc22/run_test_02_create_service.sh` script.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_02_create_service.sh
+```
 
 When the script finishes, check the WebUI *Services* tab. You should see that two 
 services have been created, one for the optical layer and another for the packet layer. 
@@ -133,13 +162,18 @@ the plots with the monitored data for the device.
 By default, device R1-EMU is selected.
 
 
-### 2.2.5.3. L3VPN Service removal
+### 2.2.5.4. L3VPN Service removal
 
 This step deconfigures the previously created services emulating the request an OSM WIM 
 would make by means of a Mock OSM instance.
 
-To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete 
-the L3NM service from the WebUI.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_03_delete_service.sh
+```
+
+or delete the L3NM service from the WebUI.
 
 When the script finishes, check the WebUI *Services* tab.
 You should see that the two services have been removed.
@@ -149,12 +183,16 @@ In the Grafana Dashboard, given that there is no service configured, you should
 0-valued flat plot again.
 
 
-### 2.2.5.4. Cleanup
+### 2.2.5.5. Cleanup
 
 This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities 
 for completeness.
 
-To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_04_cleanup.sh
+```
 
 When the script finishes, check the WebUI *Devices* tab, you should see that the devices 
 have been removed.