diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh
index 520a9da1c652553eb90acd083caf5724275f4efe..faa825fdfae2bb85f0790a877b75d533ff5aa0d5 100755
--- a/scripts/show_logs_monitoring.sh
+++ b/scripts/show_logs_monitoring.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringserver
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice server
diff --git a/src/common/tests/LoadScenario.py b/src/common/tests/LoadScenario.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c3940e67b5772f3ba3ec0634c49f26b92bbc571
--- /dev/null
+++ b/src/common/tests/LoadScenario.py
@@ -0,0 +1,50 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
+
+LOGGER = logging.getLogger(__name__)
+LOGGERS = {
+    'success': LOGGER.info,
+    'danger' : LOGGER.error,
+    'error'  : LOGGER.error,
+}
+
+def load_scenario_from_descriptor(
+    descriptor_file : str, context_client : ContextClient, device_client : DeviceClient,
+    service_client : ServiceClient, slice_client : SliceClient
+) -> DescriptorLoader:
+    with open(descriptor_file, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(
+        descriptors,
+        context_client=context_client, device_client=device_client,
+        service_client=service_client, slice_client=slice_client)
+    results = descriptor_loader.process()
+
+    num_errors = 0
+    for message,level in compose_notifications(results):
+        LOGGERS.get(level)(message)
+        if level != 'success': num_errors += 1
+    if num_errors > 0:
+        MSG = 'Failed to load descriptors in file {:s}'
+        raise Exception(MSG.format(str(descriptor_file)))
+
+    return descriptor_loader
\ No newline at end of file
diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py
index c47db248e61485e314703a43ce3cd535409cdea7..0a202ccd810ed50beca4bb9a7b4441305623f1ed 100644
--- a/src/common/tools/context_queries/InterDomain.py
+++ b/src/common/tools/context_queries/InterDomain.py
@@ -16,13 +16,13 @@ import logging
 from typing import Dict, List, Set, Tuple
 from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
 from common.DeviceTypes import DeviceTypeEnum
-from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice, TopologyId
+from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice
 from common.proto.pathcomp_pb2 import PathCompRequest
 from common.tools.context_queries.CheckType import device_type_is_network
-from common.tools.context_queries.Device import get_devices_in_topology, get_uuids_of_devices_in_topology
+from common.tools.context_queries.Device import get_devices_in_topology
+from common.tools.context_queries.Topology import get_topology
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 
@@ -60,8 +60,13 @@ def get_local_device_uuids(context_client : ContextClient) -> Set[str]:
     return local_device_uuids
 
 def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]:
-    interdomain_topology_id = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID))
-    interdomain_topology = context_client.GetTopology(interdomain_topology_id)
+    context_uuid = DEFAULT_CONTEXT_UUID
+    topology_uuid = INTERDOMAIN_TOPOLOGY_UUID
+    interdomain_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid)
+    if interdomain_topology is None:
+        MSG = '[get_interdomain_device_uuids] {:s}/{:s} topology not found'
+        LOGGER.warning(MSG.format(context_uuid, topology_uuid))
+        return set()
 
     # add abstracted devices in the interdomain topology
     interdomain_device_ids = interdomain_topology.device_ids
diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py
new file mode 100644
index 0000000000000000000000000000000000000000..15b201e731760068457683d9e30f79ab12d231d7
--- /dev/null
+++ b/src/common/tools/context_queries/Service.py
@@ -0,0 +1,39 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from typing import Optional
+from common.Constants import DEFAULT_CONTEXT_UUID
+from common.proto.context_pb2 import Service, ServiceId
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+def get_service(
+        context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        rw_copy : bool = False
+    ) -> Optional[Service]:
+    try:
+        # pylint: disable=no-member
+        service_id = ServiceId()
+        service_id.context_id.context_uuid.uuid = context_uuid
+        service_id.service_uuid.uuid = service_uuid
+        ro_service = context_client.GetService(service_id)
+        if not rw_copy: return ro_service
+        rw_service = Service()
+        rw_service.CopyFrom(ro_service)
+        return rw_service
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid)))
+        return None
diff --git a/src/common/tools/context_queries/Slice.py b/src/common/tools/context_queries/Slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f884aa94990c28ad786b3243aed948ddc7f9f34
--- /dev/null
+++ b/src/common/tools/context_queries/Slice.py
@@ -0,0 +1,39 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from typing import Optional
+from common.Constants import DEFAULT_CONTEXT_UUID
+from common.proto.context_pb2 import Slice, SliceId
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+def get_slice(
+        context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        rw_copy : bool = False
+    ) -> Optional[Slice]:
+    try:
+        # pylint: disable=no-member
+        slice_id = SliceId()
+        slice_id.context_id.context_uuid.uuid = context_uuid
+        slice_id.slice_uuid.uuid = slice_uuid
+        ro_slice = context_client.GetSlice(slice_id)
+        if not rw_copy: return ro_slice
+        rw_slice = Slice()
+        rw_slice.CopyFrom(ro_slice)
+        return rw_slice
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid)))
+        return None
diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py
index fcf1b96bb51571a71ab35fb743f8154f02e2d200..3d2077e965efb3e78ad9febbe54b4f0aaea5aef6 100644
--- a/src/common/tools/context_queries/Topology.py
+++ b/src/common/tools/context_queries/Topology.py
@@ -12,12 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import List
-from common.proto.context_pb2 import ContextId, Topology
+import grpc, logging
+from typing import List, Optional
+from common.Constants import DEFAULT_CONTEXT_UUID
+from common.proto.context_pb2 import ContextId, Topology, TopologyId
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology
 from context.client.ContextClient import ContextClient
 
+LOGGER = logging.getLogger(__name__)
+
 def create_topology(
     context_client : ContextClient, context_uuid : str, topology_uuid : str
 ) -> None:
@@ -39,3 +43,21 @@ def create_missing_topologies(
         if topology_uuid in existing_topology_uuids: continue
         grpc_topology = Topology(**json_topology(topology_uuid, context_id=context_id))
         context_client.SetTopology(grpc_topology)
+
+def get_topology(
+        context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        rw_copy : bool = False
+    ) -> Optional[Topology]:
+    try:
+        # pylint: disable=no-member
+        topology_id = TopologyId()
+        topology_id.context_id.context_uuid.uuid = context_uuid
+        topology_id.topology_uuid.uuid = topology_uuid
+        ro_topology = context_client.GetTopology(topology_id)
+        if not rw_copy: return ro_topology
+        rw_topology = Topology()
+        rw_topology.CopyFrom(ro_topology)
+        return rw_topology
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get topology({:s} / {:s})'.format(str(context_uuid), str(topology_uuid)))
+        return None
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..f14e2caf6065996ea6223449f309e03d141b5954
--- /dev/null
+++ b/src/common/tools/descriptor/Loader.py
@@ -0,0 +1,254 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# SDN controller descriptor loader
+
+# Usage example (WebUI):
+#    descriptors = json.loads(descriptors_data_from_client)
+#    descriptor_loader = DescriptorLoader(descriptors)
+#    results = descriptor_loader.process()
+#    for message,level in compose_notifications(results):
+#        flash(message, level)
+
+# Usage example (pytest):
+#    with open('path/to/descriptor.json', 'r', encoding='UTF-8') as f:
+#        descriptors = json.loads(f.read())
+#    descriptor_loader = DescriptorLoader(
+#       descriptors, context_client=..., device_client=..., service_client=..., slice_client=...)
+#    results = descriptor_loader.process()
+#    loggers = {'success': LOGGER.info, 'danger': LOGGER.error, 'error': LOGGER.error}
+#    for message,level in compose_notifications(results):
+#        loggers.get(level)(message)
+
+import json
+from typing import Dict, List, Optional, Tuple, Union
+from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
+from .Tools import (
+    format_device_custom_config_rules, format_service_custom_config_rules, format_slice_custom_config_rules,
+    get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices,
+    get_descriptors_add_topologies, split_devices_by_rules)
+
+ENTITY_TO_TEXT = {
+    # name   => singular,    plural
+    'context'   : ('Context',    'Contexts'   ),
+    'topology'  : ('Topology',   'Topologies' ),
+    'device'    : ('Device',     'Devices'    ),
+    'link'      : ('Link',       'Links'      ),
+    'service'   : ('Service',    'Services'   ),
+    'slice'     : ('Slice',      'Slices'     ),
+    'connection': ('Connection', 'Connections'),
+}
+
+ACTION_TO_TEXT = {
+    # action =>  infinitive,  past
+    'add'     : ('Add',       'Added'),
+    'update'  : ('Update',    'Updated'),
+    'config'  : ('Configure', 'Configured'),
+}
+
+TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_ok, list[error]
+TypeNotification = Tuple[str, str] # message, level
+TypeNotificationList = List[TypeNotification]
+
+def compose_notifications(results : TypeResults) -> TypeNotificationList:
+    notifications = []
+    for entity_name, action_name, num_ok, error_list in results:
+        entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name]
+        action_infinitive, action_past = ACTION_TO_TEXT[action_name]
+        num_err = len(error_list)
+        for error in error_list:
+            notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error'))
+        if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success'))
+        if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger'))
+    return notifications
+
+class DescriptorLoader:
+    def __init__(
+        self, descriptors : Union[str, Dict],
+        context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None,
+        service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None
+    ) -> None:
+        self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors
+        self.__dummy_mode  = self.__descriptors.get('dummy_mode' , False)
+        self.__contexts    = self.__descriptors.get('contexts'   , [])
+        self.__topologies  = self.__descriptors.get('topologies' , [])
+        self.__devices     = self.__descriptors.get('devices'    , [])
+        self.__links       = self.__descriptors.get('links'      , [])
+        self.__services    = self.__descriptors.get('services'   , [])
+        self.__slices      = self.__descriptors.get('slices'     , [])
+        self.__connections = self.__descriptors.get('connections', [])
+
+        self.__contexts_add   = None
+        self.__topologies_add = None
+        self.__devices_add    = None
+        self.__devices_config = None
+        self.__services_add   = None
+        self.__slices_add     = None
+
+        self.__ctx_cli = ContextClient() if context_client is None else context_client
+        self.__dev_cli = DeviceClient()  if device_client  is None else device_client
+        self.__svc_cli = ServiceClient() if service_client is None else service_client
+        self.__slc_cli = SliceClient()   if slice_client   is None else slice_client
+
+        self.__results : TypeResults = list()
+
+    @property
+    def contexts(self) -> List[Dict]: return self.__contexts
+
+    @property
+    def num_contexts(self) -> int: return len(self.__contexts)
+
+    @property
+    def topologies(self) -> Dict[str, List[Dict]]:
+        _topologies = {}
+        for topology in self.__topologies:
+            context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
+            _topologies.setdefault(context_uuid, []).append(topology)
+        return _topologies
+
+    @property
+    def num_topologies(self) -> Dict[str, int]:
+        _num_topologies = {}
+        for topology in self.__topologies:
+            context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
+            _num_topologies[context_uuid] = _num_topologies.get(context_uuid, 0) + 1
+        return _num_topologies
+
+    @property
+    def devices(self) -> List[Dict]: return self.__devices
+
+    @property
+    def num_devices(self) -> int: return len(self.__devices)
+
+    @property
+    def links(self) -> List[Dict]: return self.__links
+
+    @property
+    def num_links(self) -> int: return len(self.__links)
+
+    @property
+    def services(self) -> Dict[str, List[Dict]]:
+        _services = {}
+        for service in self.__services:
+            context_uuid = service['service_id']['context_id']['context_uuid']['uuid']
+            _services.setdefault(context_uuid, []).append(service)
+        return _services
+
+    @property
+    def num_services(self) -> Dict[str, int]:
+        _num_services = {}
+        for service in self.__services:
+            context_uuid = service['service_id']['context_id']['context_uuid']['uuid']
+            _num_services[context_uuid] = _num_services.get(context_uuid, 0) + 1
+        return _num_services
+
+    @property
+    def slices(self) -> Dict[str, List[Dict]]:
+        _slices = {}
+        for slice_ in self.__slices:
+            context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid']
+            _slices.setdefault(context_uuid, []).append(slice_)
+        return _slices
+
+    @property
+    def num_slices(self) -> Dict[str, int]:
+        _num_slices = {}
+        for slice_ in self.__slices:
+            context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid']
+            _num_slices[context_uuid] = _num_slices.get(context_uuid, 0) + 1
+        return _num_slices
+
+    @property
+    def connections(self) -> List[Dict]: return self.__connections
+
+    @property
+    def num_connections(self) -> int: return len(self.__connections)
+
+    def process(self) -> TypeResults:
+        # Format CustomConfigRules in Devices, Services and Slices provided in JSON format
+        self.__devices  = [format_device_custom_config_rules (device ) for device  in self.__devices ]
+        self.__services = [format_service_custom_config_rules(service) for service in self.__services]
+        self.__slices   = [format_slice_custom_config_rules  (slice_ ) for slice_  in self.__slices  ]
+
+        # Context and Topology require to create the entity first, and add devices, links, services,
+        # slices, etc. in a second stage.
+        self.__contexts_add = get_descriptors_add_contexts(self.__contexts)
+        self.__topologies_add = get_descriptors_add_topologies(self.__topologies)
+
+        if self.__dummy_mode:
+            self._dummy_mode()
+        else:
+            self._normal_mode()
+        
+        return self.__results
+
+    def _dummy_mode(self) -> None:
+        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
+        self.__ctx_cli.connect()
+        self._process_descr('context',    'add',    self.__ctx_cli.SetContext,    Context,    self.__contexts_add  )
+        self._process_descr('topology',   'add',    self.__ctx_cli.SetTopology,   Topology,   self.__topologies_add)
+        self._process_descr('device',     'add',    self.__ctx_cli.SetDevice,     Device,     self.__devices       )
+        self._process_descr('link',       'add',    self.__ctx_cli.SetLink,       Link,       self.__links         )
+        self._process_descr('service',    'add',    self.__ctx_cli.SetService,    Service,    self.__services      )
+        self._process_descr('slice',      'add',    self.__ctx_cli.SetSlice,      Slice,      self.__slices        )
+        self._process_descr('connection', 'add',    self.__ctx_cli.SetConnection, Connection, self.__connections   )
+        self._process_descr('context',    'update', self.__ctx_cli.SetContext,    Context,    self.__contexts      )
+        self._process_descr('topology',   'update', self.__ctx_cli.SetTopology,   Topology,   self.__topologies    )
+        #self.__ctx_cli.close()
+
+    def _normal_mode(self) -> None:
+        # Normal mode: follows the automated workflows in the different components
+        assert len(self.__connections) == 0, 'in normal mode, connections should not be set'
+
+        # Device, Service and Slice require to first create the entity and the configure it
+        self.__devices_add, self.__devices_config = split_devices_by_rules(self.__devices)
+        self.__services_add = get_descriptors_add_services(self.__services)
+        self.__slices_add = get_descriptors_add_slices(self.__slices)
+
+        self.__ctx_cli.connect()
+        self.__dev_cli.connect()
+        self.__svc_cli.connect()
+        self.__slc_cli.connect()
+
+        self._process_descr('context',  'add',    self.__ctx_cli.SetContext,      Context,  self.__contexts_add  )
+        self._process_descr('topology', 'add',    self.__ctx_cli.SetTopology,     Topology, self.__topologies_add)
+        self._process_descr('device',   'add',    self.__dev_cli.AddDevice,       Device,   self.__devices_add   )
+        self._process_descr('device',   'config', self.__dev_cli.ConfigureDevice, Device,   self.__devices_config)
+        self._process_descr('link',     'add',    self.__ctx_cli.SetLink,         Link,     self.__links         )
+        self._process_descr('service',  'add',    self.__svc_cli.CreateService,   Service,  self.__services_add  )
+        self._process_descr('service',  'update', self.__svc_cli.UpdateService,   Service,  self.__services      )
+        self._process_descr('slice',    'add',    self.__slc_cli.CreateSlice,     Slice,    self.__slices_add    )
+        self._process_descr('slice',    'update', self.__slc_cli.UpdateSlice,     Slice,    self.__slices        )
+        self._process_descr('context',  'update', self.__ctx_cli.SetContext,      Context,  self.__contexts      )
+        self._process_descr('topology', 'update', self.__ctx_cli.SetTopology,     Topology, self.__topologies    )
+
+        #self.__slc_cli.close()
+        #self.__svc_cli.close()
+        #self.__dev_cli.close()
+        #self.__ctx_cli.close()
+
+    def _process_descr(self, entity_name, action_name, grpc_method, grpc_class, entities) -> None:
+        num_ok, error_list = 0, []
+        for entity in entities:
+            try:
+                grpc_method(grpc_class(**entity))
+                num_ok += 1
+            except Exception as e: # pylint: disable=broad-except
+                error_list.append(f'{str(entity)}: {str(e)}')
+                num_err += 1
+        self.__results.append((entity_name, action_name, num_ok, error_list))
diff --git a/src/webui/service/main/DescriptorTools.py b/src/common/tools/descriptor/Tools.py
similarity index 79%
rename from src/webui/service/main/DescriptorTools.py
rename to src/common/tools/descriptor/Tools.py
index 094be2f7d0cfd69ddb5cddc2238e8cec64c75daa..909cec9d97b5baa2f7b0198091c3921a71c9b1f7 100644
--- a/src/webui/service/main/DescriptorTools.py
+++ b/src/common/tools/descriptor/Tools.py
@@ -41,8 +41,8 @@ def get_descriptors_add_services(services : List[Dict]) -> List[Dict]:
 
 def get_descriptors_add_slices(slices : List[Dict]) -> List[Dict]:
     slices_add = []
-    for slice in slices:
-        slice_copy = copy.deepcopy(slice)
+    for slice_ in slices:
+        slice_copy = copy.deepcopy(slice_)
         slice_copy['slice_endpoint_ids'] = []
         slice_copy['slice_constraints'] = []
         slice_copy['slice_config'] = {'config_rules': []}
@@ -59,6 +59,24 @@ def format_custom_config_rules(config_rules : List[Dict]) -> List[Dict]:
             config_rule['custom']['resource_value'] = custom_resource_value
     return config_rules
 
+def format_device_custom_config_rules(device : Dict) -> Dict:
+    config_rules = device.get('device_config', {}).get('config_rules', [])
+    config_rules = format_custom_config_rules(config_rules)
+    device['device_config']['config_rules'] = config_rules
+    return device
+
+def format_service_custom_config_rules(service : Dict) -> Dict:
+    config_rules = service.get('service_config', {}).get('config_rules', [])
+    config_rules = format_custom_config_rules(config_rules)
+    service['service_config']['config_rules'] = config_rules
+    return service
+
+def format_slice_custom_config_rules(slice_ : Dict) -> Dict:
+    config_rules = slice_.get('service_config', {}).get('config_rules', [])
+    config_rules = format_custom_config_rules(config_rules)
+    slice_['service_config']['config_rules'] = config_rules
+    return slice_
+
 def split_devices_by_rules(devices : List[Dict]) -> Tuple[List[Dict], List[Dict]]:
     devices_add = []
     devices_config = []
diff --git a/src/compute/tests/mock_osm/__init__.py b/src/common/tools/descriptor/__init__.py
similarity index 100%
rename from src/compute/tests/mock_osm/__init__.py
rename to src/common/tools/descriptor/__init__.py
diff --git a/src/tests/ofc22/tests/BuildDescriptors.py b/src/common/tools/descriptor/old/BuildDescriptors.py
similarity index 100%
rename from src/tests/ofc22/tests/BuildDescriptors.py
rename to src/common/tools/descriptor/old/BuildDescriptors.py
diff --git a/src/tests/ofc22/tests/LoadDescriptors.py b/src/common/tools/descriptor/old/LoadDescriptors.py
similarity index 100%
rename from src/tests/ofc22/tests/LoadDescriptors.py
rename to src/common/tools/descriptor/old/LoadDescriptors.py
index 33bc699af933601e4c6d4b8dbc7b0c51206241ef..f0b19196afbcd67c1f20263791d20820489b9cf5 100644
--- a/src/tests/ofc22/tests/LoadDescriptors.py
+++ b/src/common/tools/descriptor/old/LoadDescriptors.py
@@ -14,8 +14,8 @@
 
 import json, logging, sys
 from common.Settings import get_setting
-from context.client.ContextClient import ContextClient
 from common.proto.context_pb2 import Context, Device, Link, Topology
+from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 
 LOGGER = logging.getLogger(__name__)
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
index 7e050289f19b93dc710185c2b29b326bbfd156d2..e3d12088147a59c3fd9e0179d3a3d957483fcc22 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
@@ -17,10 +17,10 @@ from flask import request
 from flask.json import jsonify
 from flask_restful import Resource
 from common.proto.context_pb2 import SliceStatusEnum
+from common.tools.context_queries.Slice import get_slice
 from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
 from .tools.Authentication import HTTP_AUTH
-from .tools.ContextMethods import get_slice
 from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR
 
 LOGGER = logging.getLogger(__name__)
@@ -34,7 +34,7 @@ class L2VPN_Service(Resource):
         try:
             context_client = ContextClient()
 
-            target = get_slice(context_client, vpn_id)
+            target = get_slice(context_client, vpn_id, rw_copy=True)
             if target is None:
                 raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index 8aa410e9a2136f8f0c6df94a9d17ea376fcfc516..819d8995da6ffc3a7913c8781e4021ce83665e29 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -20,6 +20,7 @@ from flask.wrappers import Response
 from flask_restful import Resource
 from werkzeug.exceptions import UnsupportedMediaType
 from common.proto.context_pb2 import Slice
+from common.tools.context_queries.Slice import get_slice
 from common.tools.grpc.ConfigRules import update_config_rule_custom
 from common.tools.grpc.Constraints import (
     update_constraint_custom_dict, update_constraint_endpoint_location, update_constraint_endpoint_priority,
@@ -30,7 +31,6 @@ from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
 from .schemas.site_network_access import SCHEMA_SITE_NETWORK_ACCESS
 from .tools.Authentication import HTTP_AUTH
-from .tools.ContextMethods import get_slice
 from .tools.HttpStatusCodes import HTTP_NOCONTENT, HTTP_SERVERERROR
 from .tools.Validator import validate_message
 from .Constants import (
@@ -69,7 +69,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
         address_ip, address_prefix, remote_router, circuit_id
     ) = mapping
 
-    target = get_slice(context_client, vpn_id)
+    target = get_slice(context_client, vpn_id, rw_copy=True)
     if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
     endpoint_ids = target.slice_endpoint_ids        # pylint: disable=no-member
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py
deleted file mode 100644
index ac9e6fe4a5c138d00bc80fd953de2cc21d4677b5..0000000000000000000000000000000000000000
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import grpc, logging
-from typing import Optional
-from common.Constants import DEFAULT_CONTEXT_UUID
-from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
-from context.client.ContextClient import ContextClient
-
-LOGGER = logging.getLogger(__name__)
-
-def get_service(
-        context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID
-    ) -> Optional[Service]:
-    try:
-        # pylint: disable=no-member
-        service_id = ServiceId()
-        service_id.context_id.context_uuid.uuid = context_uuid
-        service_id.service_uuid.uuid = service_uuid
-        service_readonly = context_client.GetService(service_id)
-        service = Service()
-        service.CopyFrom(service_readonly)
-        return service
-    except grpc.RpcError:
-        #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid)))
-        return None
-
-def get_slice(
-        context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID
-    ) -> Optional[Slice]:
-    try:
-        # pylint: disable=no-member
-        slice_id = SliceId()
-        slice_id.context_id.context_uuid.uuid = context_uuid
-        slice_id.slice_uuid.uuid = slice_uuid
-        slice_readonly = context_client.GetSlice(slice_id)
-        slice_ = Slice()
-        slice_.CopyFrom(slice_readonly)
-        return slice_
-    except grpc.RpcError:
-        #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid)))
-        return None
diff --git a/src/compute/tests/PrepareTestScenario.py b/src/compute/tests/PrepareTestScenario.py
index d534a4a28280c80964096a9cb7291c498ebe6b93..06fb34f9ee7508f4bd6fa769da78c50eb78c3bb8 100644
--- a/src/compute/tests/PrepareTestScenario.py
+++ b/src/compute/tests/PrepareTestScenario.py
@@ -19,7 +19,7 @@ from common.Settings import (
 from compute.service.rest_server.RestServer import RestServer
 from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
 from compute.tests.MockService_Dependencies import MockService_Dependencies
-from .mock_osm.MockOSM import MockOSM
+from tests.tools.mock_osm.MockOSM import MockOSM
 from .Constants import WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD
 
 LOCAL_HOST = '127.0.0.1'
diff --git a/src/compute/tests/test_unitary.py b/src/compute/tests/test_unitary.py
index 05c45c1b3554d21084a4a20cac6856b049fe7ca3..acef6d4a68cb1e89df2fa567d437412c8805b35f 100644
--- a/src/compute/tests/test_unitary.py
+++ b/src/compute/tests/test_unitary.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from .mock_osm.MockOSM import MockOSM
+from tests.tools.mock_osm.MockOSM import MockOSM
 from .Constants import SERVICE_CONNECTION_POINTS_1, SERVICE_CONNECTION_POINTS_2, SERVICE_TYPE
 from .PrepareTestScenario import ( # pylint: disable=unused-import
     # be careful, order of symbols is important here!
diff --git a/src/tests/Fixtures.py b/src/tests/Fixtures.py
index aeead8448651b386f4c69d12c139b6043fe5ef55..25b73e1de143b8c60d9a726ddf2bd3cea97d17a5 100644
--- a/src/tests/Fixtures.py
+++ b/src/tests/Fixtures.py
@@ -13,8 +13,6 @@
 # limitations under the License.
 
 import pytest
-from common.Settings import get_setting
-from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from monitoring.client.MonitoringClient import MonitoringClient
diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh
index 8afd683843d4882e75c3cbca8363aa3d63edda7f..ffd91da35186fe21f418950493ef797a9af1b522 100644
--- a/src/tests/ofc22/deploy_specs.sh
+++ b/src/tests/ofc22/deploy_specs.sh
@@ -2,6 +2,11 @@
 export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
+# Supported components are:
+#   context device automation policy service compute monitoring webui
+#   interdomain slice pathcomp dlt
+#   dbscanserving opticalattackmitigator opticalattackdetector
+#   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
 export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
 
 # Set the tag you want to use for your images.
@@ -13,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
-# Set the neew Grafana admin password
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json
index 83f9c39e2ac7154b088ccdd0a1519ea32c1aee1d..a71d454f41f324cabb48a023d6d840a59245800c 100644
--- a/src/tests/ofc22/descriptors_emulated.json
+++ b/src/tests/ofc22/descriptors_emulated.json
@@ -9,70 +9,83 @@
     "topologies": [
         {
             "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}},
-            "device_ids": [],
-            "link_ids": []
+            "device_ids": [
+                {"device_uuid": {"uuid": "R1-EMU"}},
+                {"device_uuid": {"uuid": "R2-EMU"}},
+                {"device_uuid": {"uuid": "R3-EMU"}},
+                {"device_uuid": {"uuid": "R4-EMU"}},
+                {"device_uuid": {"uuid": "O1-OLS"}}                
+            ],
+            "link_ids": [
+                {"link_uuid": {"uuid": "R1-EMU/13/0/0==O1-OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+                {"link_uuid": {"uuid": "R2-EMU/13/0/0==O1-OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+                {"link_uuid": {"uuid": "R3-EMU/13/0/0==O1-OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+                {"link_uuid": {"uuid": "R4-EMU/13/0/0==O1-OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
+            ]
         }
     ],
     "devices": [
         {
-            "device_id": {"device_uuid": {"uuid": "R1-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "R2-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "R3-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "R4-EMU"}},
-            "device_type": "emu-packet-router",
+            "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "13/0/0", "type": "optical", "sample_types": []},
+                    {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
+                ]}}}
+            ]}
         },
         {
-            "device_id": {"device_uuid": {"uuid": "O1-OLS"}},
-            "device_type": "emu-open-line-system",
+            "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "device_type": "emu-open-line-system",
+            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
-                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"aade6001-f00b-5e2f-a357-6a0a9d3de870\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"eb287d83-f05e-53ec-ab5a-adf6bd2b5418\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"0ef74f99-1acc-57bd-ab9d-4b958b06c513\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"50296d99-58cc-5ce7-82f5-fc8ee4eec2ec\"}]}"}}
-            ]},
-            "device_operational_status": 1,
-            "device_drivers": [0],
-            "device_endpoints": []
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870", "type": "optical", "sample_types": []},
+                    {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "type": "optical", "sample_types": []},
+                    {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "type": "optical", "sample_types": []},
+                    {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "type": "optical", "sample_types": []}
+                ]}}}
+            ]}
         }
     ],
     "links": [
diff --git a/src/tests/ofc22/run_test_01_bootstrap.sh b/src/tests/ofc22/run_test_01_bootstrap.sh
index bb740707321b24fc960299f2eac91cc2d9775b64..61b49b251f927ffb2e845f0c9094d30ea597abc6 100755
--- a/src/tests/ofc22/run_test_01_bootstrap.sh
+++ b/src/tests/ofc22/run_test_01_bootstrap.sh
@@ -13,9 +13,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# make sure to source the following scripts:
-# - my_deploy.sh
-# - tfs_runtime_env_vars.sh
-
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_bootstrap.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_bootstrap.py
diff --git a/src/tests/ofc22/run_test_02_create_service.sh b/src/tests/ofc22/run_test_02_create_service.sh
index 8b6c8658df759bdcb777f83c6c7846d0ea7b48ed..135a3f74fe93d0d7a4da6ef0e02371a040fc1eb3 100755
--- a/src/tests/ofc22/run_test_02_create_service.sh
+++ b/src/tests/ofc22/run_test_02_create_service.sh
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_create_service.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_create_service.py
diff --git a/src/tests/ofc22/run_test_03_delete_service.sh b/src/tests/ofc22/run_test_03_delete_service.sh
index 51df41aee216e141b0d2e2f55a0398ecd9cdf35f..cbe6714fe91cf1758f62e697e667568d35578181 100755
--- a/src/tests/ofc22/run_test_03_delete_service.sh
+++ b/src/tests/ofc22/run_test_03_delete_service.sh
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_delete_service.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_delete_service.py
diff --git a/src/tests/ofc22/run_test_04_cleanup.sh b/src/tests/ofc22/run_test_04_cleanup.sh
index 2ba91684f9eb49075dd68877e54976f989811ae9..e88ddbd3227b3f29dfc7f126d5853e0b1d0e06f1 100755
--- a/src/tests/ofc22/run_test_04_cleanup.sh
+++ b/src/tests/ofc22/run_test_04_cleanup.sh
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 source tfs_runtime_env_vars.sh
-pytest --verbose src/tests/ofc22/tests/test_functional_cleanup.py
+pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/run_tests_and_coverage.sh b/src/tests/ofc22/run_tests.sh
similarity index 77%
rename from src/tests/ofc22/run_tests_and_coverage.sh
rename to src/tests/ofc22/run_tests.sh
index ae956925a430e0eab167bf36a49be59014a2a97b..0ad4be313987b8b5069808873f94840521d4284e 100755
--- a/src/tests/ofc22/run_tests_and_coverage.sh
+++ b/src/tests/ofc22/run_tests.sh
@@ -16,7 +16,6 @@
 
 PROJECTDIR=`pwd`
 
-# cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 COVERAGEFILE=$PROJECTDIR/coverage/.coverage
 
@@ -31,15 +30,15 @@ source tfs_runtime_env_vars.sh
 # Force a flush of Context database
 kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
 
-# Run functional tests and analyze code coverage at the same time
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+# Run functional tests
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_bootstrap.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_create_service.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_delete_service.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/setup_test_env.sh b/src/tests/ofc22/setup_test_env.sh
deleted file mode 100755
index 1f8b0a5a7a8dc986715c6f54a62151f6afa4ad80..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/setup_test_env.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get service/contextservice --namespace tfs  --template '{{.spec.clusterIP}}')
-export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service/contextservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}')
-export COMPUTESERVICE_SERVICE_HOST=$(kubectl get service/computeservice --namespace tfs  --template '{{.spec.clusterIP}}')
-export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service/computeservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
-echo "CONTEXTSERVICE_SERVICE_HOST=$CONTEXTSERVICE_SERVICE_HOST"
-echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=$CONTEXTSERVICE_SERVICE_PORT_GRPC"
-echo "COMPUTESERVICE_SERVICE_HOST=$COMPUTESERVICE_SERVICE_HOST"
-echo "COMPUTESERVICE_SERVICE_PORT_HTTP=$COMPUTESERVICE_SERVICE_PORT_HTTP"
diff --git a/src/tests/ofc22/tests/Fixtures.py b/src/tests/ofc22/tests/Fixtures.py
index 370731e5de14b2c7c4acdcfa86eacfa66f2ffd4b..3b35a12e299ba776e909fbdd2739e971431083a6 100644
--- a/src/tests/ofc22/tests/Fixtures.py
+++ b/src/tests/ofc22/tests/Fixtures.py
@@ -12,14 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import pytest
+import pytest, logging
 from common.Settings import get_setting
-from compute.tests.mock_osm.MockOSM import MockOSM
-from .Objects import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
+from tests.tools.mock_osm.Constants import WIM_PASSWORD, WIM_USERNAME
+from tests.tools.mock_osm.MockOSM import MockOSM
+from .Objects import WIM_MAPPING
 
+LOGGER = logging.getLogger(__name__)
 
 @pytest.fixture(scope='session')
 def osm_wim():
     wim_url = 'http://{:s}:{:s}'.format(
         get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
+    LOGGER.info('WIM_MAPPING = {:s}'.format(str(WIM_MAPPING)))
     return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
diff --git a/src/tests/ofc22/tests/Objects.py b/src/tests/ofc22/tests/Objects.py
index d2fb32ebb20b7bcdda9ac12b7a7390c46e6fb1d1..7bfbe9fce558d6a86d965ecb6421369d7f544d4d 100644
--- a/src/tests/ofc22/tests/Objects.py
+++ b/src/tests/ofc22/tests/Objects.py
@@ -12,220 +12,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, List, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
-from common.tools.object_factory.Context import json_context, json_context_id
-from common.tools.object_factory.Device import (
-    json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
-    json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled)
-from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
-from common.tools.object_factory.Link import json_link, json_link_id
-from common.tools.object_factory.Topology import json_topology, json_topology_id
-from common.proto.kpi_sample_types_pb2 import KpiSampleType
-
-# ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
-
-# ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-
-# ----- Monitoring Samples ---------------------------------------------------------------------------------------------
-PACKET_PORT_SAMPLE_TYPES = [
-    KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED,
-    KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED,
-    KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED,
-    KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED,
-]
-
-# ----- Device Credentials and Settings --------------------------------------------------------------------------------
-try:
-    from .Credentials import DEVICE_R1_ADDRESS, DEVICE_R1_PORT, DEVICE_R1_USERNAME, DEVICE_R1_PASSWORD
-    from .Credentials import DEVICE_R3_ADDRESS, DEVICE_R3_PORT, DEVICE_R3_USERNAME, DEVICE_R3_PASSWORD
-    from .Credentials import DEVICE_O1_ADDRESS, DEVICE_O1_PORT
-    USE_REAL_DEVICES = True     # Use real devices
-except ImportError:
-    USE_REAL_DEVICES = False    # Use emulated devices
-
-    DEVICE_R1_ADDRESS  = '0.0.0.0'
-    DEVICE_R1_PORT     = 830
-    DEVICE_R1_USERNAME = 'admin'
-    DEVICE_R1_PASSWORD = 'admin'
-
-    DEVICE_R3_ADDRESS  = '0.0.0.0'
-    DEVICE_R3_PORT     = 830
-    DEVICE_R3_USERNAME = 'admin'
-    DEVICE_R3_PASSWORD = 'admin'
-
-    DEVICE_O1_ADDRESS  = '0.0.0.0'
-    DEVICE_O1_PORT     = 4900
-
-#USE_REAL_DEVICES = False     # Uncomment to force to use emulated devices
-
-def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]):
-    return [
-        json_endpoint_id(device_id, ep_uuid, topology_id=None)
-        for ep_uuid, _, _ in endpoint_descriptors
-    ]
-
-def json_endpoints(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]):
-    return [
-        json_endpoint(device_id, ep_uuid, ep_type, topology_id=None, kpi_sample_types=ep_sample_types)
-        for ep_uuid, ep_type, ep_sample_types in endpoint_descriptors
-    ]
-
-def get_link_uuid(a_device_id : Dict, a_endpoint_id : Dict, z_device_id : Dict, z_endpoint_id : Dict) -> str:
-    return '{:s}/{:s}=={:s}/{:s}'.format(
-        a_device_id['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'],
-        z_device_id['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
-
-
-# ----- Devices --------------------------------------------------------------------------------------------------------
-if not USE_REAL_DEVICES:
-    json_device_packetrouter_disabled = json_device_emulated_packet_router_disabled
-    json_device_tapi_disabled         = json_device_emulated_tapi_disabled
-
-DEVICE_R1_UUID          = 'R1-EMU'
-DEVICE_R1_TIMEOUT       = 120
-DEVICE_R1_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R1_ID            = json_device_id(DEVICE_R1_UUID)
-#DEVICE_R1_ENDPOINTS     = json_endpoints(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS)
-DEVICE_R1_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS)
-DEVICE_R1               = json_device_packetrouter_disabled(DEVICE_R1_UUID)
-ENDPOINT_ID_R1_13_0_0   = DEVICE_R1_ENDPOINT_IDS[0]
-ENDPOINT_ID_R1_13_1_2   = DEVICE_R1_ENDPOINT_IDS[1]
-DEVICE_R1_CONNECT_RULES = json_device_connect_rules(DEVICE_R1_ADDRESS, DEVICE_R1_PORT, {
-    'username': DEVICE_R1_USERNAME,
-    'password': DEVICE_R1_PASSWORD,
-    'timeout' : DEVICE_R1_TIMEOUT,
-}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R1_ENDPOINT_DEFS)
-
-
-DEVICE_R2_UUID          = 'R2-EMU'
-DEVICE_R2_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R2_ID            = json_device_id(DEVICE_R2_UUID)
-#DEVICE_R2_ENDPOINTS     = json_endpoints(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS)
-DEVICE_R2_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS)
-DEVICE_R2               = json_device_emulated_packet_router_disabled(DEVICE_R2_UUID)
-ENDPOINT_ID_R2_13_0_0   = DEVICE_R2_ENDPOINT_IDS[0]
-ENDPOINT_ID_R2_13_1_2   = DEVICE_R2_ENDPOINT_IDS[1]
-DEVICE_R2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R2_ENDPOINT_DEFS)
-
-
-DEVICE_R3_UUID          = 'R3-EMU'
-DEVICE_R3_TIMEOUT       = 120
-DEVICE_R3_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R3_ID            = json_device_id(DEVICE_R3_UUID)
-#DEVICE_R3_ENDPOINTS     = json_endpoints(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS)
-DEVICE_R3_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS)
-DEVICE_R3               = json_device_packetrouter_disabled(DEVICE_R3_UUID)
-ENDPOINT_ID_R3_13_0_0   = DEVICE_R3_ENDPOINT_IDS[0]
-ENDPOINT_ID_R3_13_1_2   = DEVICE_R3_ENDPOINT_IDS[1]
-DEVICE_R3_CONNECT_RULES = json_device_connect_rules(DEVICE_R3_ADDRESS, DEVICE_R3_PORT, {
-    'username': DEVICE_R3_USERNAME,
-    'password': DEVICE_R3_PASSWORD,
-    'timeout' : DEVICE_R3_TIMEOUT,
-}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R3_ENDPOINT_DEFS)
-
-
-DEVICE_R4_UUID          = 'R4-EMU'
-DEVICE_R4_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
-DEVICE_R4_ID            = json_device_id(DEVICE_R4_UUID)
-#DEVICE_R4_ENDPOINTS     = json_endpoints(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS)
-DEVICE_R4_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS)
-DEVICE_R4               = json_device_emulated_packet_router_disabled(DEVICE_R4_UUID)
-ENDPOINT_ID_R4_13_0_0   = DEVICE_R4_ENDPOINT_IDS[0]
-ENDPOINT_ID_R4_13_1_2   = DEVICE_R4_ENDPOINT_IDS[1]
-DEVICE_R4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R4_ENDPOINT_DEFS)
-
-
-DEVICE_O1_UUID          = 'O1-OLS'
-DEVICE_O1_TIMEOUT       = 120
-DEVICE_O1_ENDPOINT_DEFS = [
-    ('aade6001-f00b-5e2f-a357-6a0a9d3de870', 'optical', []), # node_1_port_13
-    ('eb287d83-f05e-53ec-ab5a-adf6bd2b5418', 'optical', []), # node_2_port_13
-    ('0ef74f99-1acc-57bd-ab9d-4b958b06c513', 'optical', []), # node_3_port_13
-    ('50296d99-58cc-5ce7-82f5-fc8ee4eec2ec', 'optical', []), # node_4_port_13
-]
-DEVICE_O1_ID            = json_device_id(DEVICE_O1_UUID)
-DEVICE_O1               = json_device_tapi_disabled(DEVICE_O1_UUID)
-#DEVICE_O1_ENDPOINTS     = json_endpoints(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS)
-DEVICE_O1_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS)
-ENDPOINT_ID_O1_EP1      = DEVICE_O1_ENDPOINT_IDS[0]
-ENDPOINT_ID_O1_EP2      = DEVICE_O1_ENDPOINT_IDS[1]
-ENDPOINT_ID_O1_EP3      = DEVICE_O1_ENDPOINT_IDS[2]
-ENDPOINT_ID_O1_EP4      = DEVICE_O1_ENDPOINT_IDS[3]
-DEVICE_O1_CONNECT_RULES = json_device_connect_rules(DEVICE_O1_ADDRESS, DEVICE_O1_PORT, {
-    'timeout' : DEVICE_O1_TIMEOUT,
-}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_O1_ENDPOINT_DEFS)
-
-
-# ----- Links ----------------------------------------------------------------------------------------------------------
-LINK_R1_O1_UUID = get_link_uuid(DEVICE_R1_ID, ENDPOINT_ID_R1_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP1)
-LINK_R1_O1_ID   = json_link_id(LINK_R1_O1_UUID)
-LINK_R1_O1      = json_link(LINK_R1_O1_UUID, [ENDPOINT_ID_R1_13_0_0, ENDPOINT_ID_O1_EP1])
-
-LINK_R2_O1_UUID = get_link_uuid(DEVICE_R2_ID, ENDPOINT_ID_R2_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP2)
-LINK_R2_O1_ID   = json_link_id(LINK_R2_O1_UUID)
-LINK_R2_O1      = json_link(LINK_R2_O1_UUID, [ENDPOINT_ID_R2_13_0_0, ENDPOINT_ID_O1_EP2])
-
-LINK_R3_O1_UUID = get_link_uuid(DEVICE_R3_ID, ENDPOINT_ID_R3_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP3)
-LINK_R3_O1_ID   = json_link_id(LINK_R3_O1_UUID)
-LINK_R3_O1      = json_link(LINK_R3_O1_UUID, [ENDPOINT_ID_R3_13_0_0, ENDPOINT_ID_O1_EP3])
-
-LINK_R4_O1_UUID = get_link_uuid(DEVICE_R4_ID, ENDPOINT_ID_R4_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP4)
-LINK_R4_O1_ID   = json_link_id(LINK_R4_O1_UUID)
-LINK_R4_O1      = json_link(LINK_R4_O1_UUID, [ENDPOINT_ID_R4_13_0_0, ENDPOINT_ID_O1_EP4])
-
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.EndPoint import json_endpoint_id
+from tests.tools.mock_osm.Tools import connection_point, wim_mapping
 
 # ----- WIM Service Settings -------------------------------------------------------------------------------------------
 
-def compose_service_endpoint_id(endpoint_id):
-    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
-    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
-    return ':'.join([device_uuid, endpoint_uuid])
-
-WIM_SEP_R1_ID      = compose_service_endpoint_id(ENDPOINT_ID_R1_13_1_2)
-WIM_SEP_R1_SITE_ID = '1'
-WIM_SEP_R1_BEARER  = WIM_SEP_R1_ID
-WIM_SRV_R1_VLAN_ID = 400
+WIM_DC1_SITE_ID     = '1'
+WIM_DC1_DEVICE_ID   = json_device_id('R1-EMU')
+WIM_DC1_ENDPOINT_ID = json_endpoint_id(WIM_DC1_DEVICE_ID, '13/1/2')
 
-WIM_SEP_R3_ID      = compose_service_endpoint_id(ENDPOINT_ID_R3_13_1_2)
-WIM_SEP_R3_SITE_ID = '2'
-WIM_SEP_R3_BEARER  = WIM_SEP_R3_ID
-WIM_SRV_R3_VLAN_ID = 500
+WIM_DC2_SITE_ID     = '2'
+WIM_DC2_DEVICE_ID   = json_device_id('R3-EMU')
+WIM_DC2_ENDPOINT_ID = json_endpoint_id(WIM_DC2_DEVICE_ID, '13/1/2')
 
-WIM_USERNAME = 'admin'
-WIM_PASSWORD = 'admin'
+WIM_SEP_DC1, WIM_MAP_DC1 = wim_mapping(WIM_DC1_SITE_ID, WIM_DC1_ENDPOINT_ID)
+WIM_SEP_DC2, WIM_MAP_DC2 = wim_mapping(WIM_DC2_SITE_ID, WIM_DC2_ENDPOINT_ID)
+WIM_MAPPING  = [WIM_MAP_DC1, WIM_MAP_DC2]
 
-WIM_MAPPING  = [
-    {'device-id': DEVICE_R1_UUID, 'service_endpoint_id': WIM_SEP_R1_ID,
-     'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R1_BEARER}, 'site-id': WIM_SEP_R1_SITE_ID}},
-    {'device-id': DEVICE_R3_UUID, 'service_endpoint_id': WIM_SEP_R3_ID,
-     'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R3_BEARER}, 'site-id': WIM_SEP_R3_SITE_ID}},
-]
+WIM_SRV_VLAN_ID = 300
 WIM_SERVICE_TYPE = 'ELINE'
 WIM_SERVICE_CONNECTION_POINTS = [
-    {'service_endpoint_id': WIM_SEP_R1_ID,
-        'service_endpoint_encapsulation_type': 'dot1q',
-        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R1_VLAN_ID}},
-    {'service_endpoint_id': WIM_SEP_R3_ID,
-        'service_endpoint_encapsulation_type': 'dot1q',
-        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R3_VLAN_ID}},
+    connection_point(WIM_SEP_DC1, 'dot1q', WIM_SRV_VLAN_ID),
+    connection_point(WIM_SEP_DC2, 'dot1q', WIM_SRV_VLAN_ID),
 ]
-
-# ----- Object Collections ---------------------------------------------------------------------------------------------
-
-CONTEXTS = [CONTEXT]
-TOPOLOGIES = [TOPOLOGY]
-
-DEVICES = [
-    (DEVICE_R1, DEVICE_R1_CONNECT_RULES),
-    (DEVICE_R2, DEVICE_R2_CONNECT_RULES),
-    (DEVICE_R3, DEVICE_R3_CONNECT_RULES),
-    (DEVICE_R4, DEVICE_R4_CONNECT_RULES),
-    (DEVICE_O1, DEVICE_O1_CONNECT_RULES),
-]
-
-LINKS = [LINK_R1_O1, LINK_R2_O1, LINK_R3_O1, LINK_R4_O1]
\ No newline at end of file
diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py
index 65b7cece1625032d8e02a5962d49d892e29d615a..71deb9d596b1494e148b140902ca927e5d664dd3 100644
--- a/src/tests/ofc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ofc22/tests/test_functional_bootstrap.py
@@ -12,27 +12,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, logging, pytest
-from common.Settings import get_setting
+import logging, time
+from common.proto.context_pb2 import ContextId, Empty
 from common.proto.monitoring_pb2 import KpiDescriptorList
-from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
+from common.tests.LoadScenario import load_scenario_from_descriptor
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
-from monitoring.client.MonitoringClient import MonitoringClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology
 from device.client.DeviceClient import DeviceClient
-from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
-from tests.Fixtures import context_client, device_client, monitoring_client
+from monitoring.client.MonitoringClient import MonitoringClient
+from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 
-def test_scenario_empty(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+def test_scenario_bootstrap(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,   # pylint: disable=redefined-outer-name
+) -> None:
     # ----- List entities - Ensure database is empty -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
     assert len(response.contexts) == 0
@@ -44,160 +43,53 @@ def test_scenario_empty(context_client : ContextClient):  # pylint: disable=rede
     assert len(response.links) == 0
 
 
-def test_prepare_scenario(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Create Contexts and Topologies -----------------------------------------------------------------------------
-    for context in CONTEXTS:
-        context_uuid = context['context_id']['context_uuid']['uuid']
-        LOGGER.info('Adding Context {:s}'.format(context_uuid))
-        response = context_client.SetContext(Context(**context))
-        assert response.context_uuid.uuid == context_uuid
-        #expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid)))
-
-    for topology in TOPOLOGIES:
-        context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
-        topology_uuid = topology['topology_id']['topology_uuid']['uuid']
-        LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
-        response = context_client.SetTopology(Topology(**topology))
-        assert response.context_id.context_uuid.uuid == context_uuid
-        assert response.topology_uuid.uuid == topology_uuid
-        context_id = json_context_id(context_uuid)
-        #expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id)))
+    # ----- Load Scenario ----------------------------------------------------------------------------------------------
+    descriptor_loader = load_scenario_from_descriptor(
+        DESCRIPTOR_FILE, context_client, device_client, None, None)
 
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
 
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_scenario_ready(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
-
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
-
-
-def test_devices_bootstraping(
-    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client, log_events_received=True)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Create Devices and Validate Collected Events ---------------------------------------------------------------
-    for device, connect_rules in DEVICES:
-        device_uuid = device['device_id']['device_uuid']['uuid']
-        LOGGER.info('Adding Device {:s}'.format(device_uuid))
-
-        device_with_connect_rules = copy.deepcopy(device)
-        device_with_connect_rules['device_config']['config_rules'].extend(connect_rules)
-        response = device_client.AddDevice(Device(**device_with_connect_rules))
-        assert response.device_uuid.uuid == device_uuid
-
-        #expected_events.extend([
-        #    # Device creation, update for automation to start the device
-        #    ('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)),
-        #    #('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)),
-        #])
-
-        #response = context_client.GetDevice(response)
-        #for endpoint in response.device_endpoints:
-        #    for _ in endpoint.kpi_sample_types:
-        #        # Monitoring configures monitoring for endpoint
-        #        expected_events.append(('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)))
-
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_devices_bootstrapped(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure bevices are created -----------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
-
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
-
-
-def test_links_creation(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Create Links and Validate Collected Events -----------------------------------------------------------------
-    for link in LINKS:
-        link_uuid = link['link_id']['link_uuid']['uuid']
-        LOGGER.info('Adding Link {:s}'.format(link_uuid))
-        response = context_client.SetLink(Link(**link))
-        assert response.link_uuid.uuid == link_uuid
-        #expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid)))
-
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_links_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure links are created -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
+    assert len(response.links) == descriptor_loader.num_links
 
+    for context_uuid, _ in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
 
-def test_scenario_kpis_created(monitoring_client: MonitoringClient):
+def test_scenario_kpis_created(
+    context_client : ContextClient,         # pylint: disable=redefined-outer-name
+    monitoring_client: MonitoringClient,    # pylint: disable=redefined-outer-name
+) -> None:
     """
     This test validates that KPIs related to the service/device/endpoint were created
     during the service creation process.
     """
-    response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty())
-    # TODO: replace the magic number `16` below for a formula that adapts to the number
-    # of links and devices
-    assert len(response.kpi_descriptor_list) >= 16
+    response = context_client.ListDevices(Empty())
+    kpis_expected = set()
+    for device in response.devices:
+        device_uuid = device.device_id.device_uuid.uuid
+        for endpoint in device.device_endpoints:
+            endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+            for kpi_sample_type in endpoint.kpi_sample_types:
+                kpis_expected.add((device_uuid, endpoint_uuid, kpi_sample_type))
+    num_kpis_expected = len(kpis_expected)
+    LOGGER.info('Num KPIs expected: {:d}'.format(num_kpis_expected))
+
+    num_kpis_created, num_retry = 0, 0
+    while (num_kpis_created != num_kpis_expected) and (num_retry < 5):
+        response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty())
+        num_kpis_created = len(response.kpi_descriptor_list)
+        LOGGER.info('Num KPIs created: {:d}'.format(num_kpis_created))
+        time.sleep(0.5)
+        num_retry += 1
+    assert num_kpis_created == num_kpis_expected
diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py
index b0dfe54900f5a806607fcd669942e7fa592dcbaa..be807eaa0242f2363b5b6c189ce4de264528a54c 100644
--- a/src/tests/ofc22/tests/test_functional_cleanup.py
+++ b/src/tests/ofc22/tests/test_functional_cleanup.py
@@ -12,93 +12,63 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, pytest
-from common.Settings import get_setting
-from common.tests.EventTools import EVENT_REMOVE, check_events
+import logging
+from common.tools.descriptor.Loader import DescriptorLoader
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
-from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
 from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
+from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
-from tests.Fixtures import context_client, device_client
-from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+from tests.Fixtures import context_client, device_client    # pylint: disable=unused-import
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 
-def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+
+def test_services_removed(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,   # pylint: disable=redefined-outer-name
+) -> None:
     # ----- List entities - Ensure service is removed ------------------------------------------------------------------
+    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(descriptors)
+
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
-
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
-
-
-def test_scenario_cleanup(
-    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
-
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client)
-    #events_collector.start()
-
-    #expected_events = []
-
-    # ----- Delete Links and Validate Collected Events -----------------------------------------------------------------
-    for link in LINKS:
-        link_id = link['link_id']
-        link_uuid = link_id['link_uuid']['uuid']
-        LOGGER.info('Deleting Link {:s}'.format(link_uuid))
-        context_client.RemoveLink(LinkId(**link_id))
-        #expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid)))
-
-    # ----- Delete Devices and Validate Collected Events ---------------------------------------------------------------
-    for device, _ in DEVICES:
-        device_id = device['device_id']
-        device_uuid = device_id['device_uuid']['uuid']
-        LOGGER.info('Deleting Device {:s}'.format(device_uuid))
-        device_client.DeleteDevice(DeviceId(**device_id))
-        #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
-
-    # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------
-    for topology in TOPOLOGIES:
-        topology_id = topology['topology_id']
-        context_uuid = topology_id['context_id']['context_uuid']['uuid']
-        topology_uuid = topology_id['topology_uuid']['uuid']
-        LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
-        context_client.RemoveTopology(TopologyId(**topology_id))
-        context_id = json_context_id(context_uuid)
-        #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
-
-    # ----- Delete Contexts and Validate Collected Events --------------------------------------------------------------
-    for context in CONTEXTS:
-        context_id = context['context_id']
-        context_uuid = context_id['context_uuid']['uuid']
-        LOGGER.info('Deleting Context {:s}'.format(context_uuid))
-        context_client.RemoveContext(ContextId(**context_id))
-        #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
-
-    # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
-
-
-def test_scenario_empty_again(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    assert len(response.links) == descriptor_loader.num_links
+
+    for context_uuid, _ in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
+
+
+    # ----- Delete Links, Devices, Topologies, Contexts ----------------------------------------------------------------
+    for link in descriptor_loader.links:
+        context_client.RemoveLink(LinkId(**link['link_id']))
+
+    for device in descriptor_loader.devices:
+        device_client .DeleteDevice(DeviceId(**device['device_id']))
+
+    for context_uuid, topology_list in descriptor_loader.topologies.items():
+        for topology in topology_list:
+            context_client.RemoveTopology(TopologyId(**topology['topology_id']))
+
+    for context in descriptor_loader.contexts:
+        context_client.RemoveContext(ContextId(**context['context_id']))
+
+
     # ----- List entities - Ensure database is empty again -------------------------------------------------------------
     response = context_client.ListContexts(Empty())
     assert len(response.contexts) == 0
diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py
index 5615f119b91fba10dd767d7188b303f926750e06..e606d060d52631ba72e191d7c025bd7b43048b39 100644
--- a/src/tests/ofc22/tests/test_functional_create_service.py
+++ b/src/tests/ofc22/tests/test_functional_create_service.py
@@ -12,24 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, pytest, random, time
+import logging, random
 from common.DeviceTypes import DeviceTypeEnum
-from common.Settings import get_setting
-from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
-from common.tools.object_factory.Connection import json_connection_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Service import json_service_id
+from common.proto.context_pb2 import ContextId, Empty
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.tools.descriptor.Loader import DescriptorLoader
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from compute.tests.mock_osm.MockOSM import MockOSM
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from monitoring.client.MonitoringClient import MonitoringClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ContextId, Empty
-from tests.Fixtures import context_client, monitoring_client
-from .Fixtures import osm_wim
-from .Objects import (
-    CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES,
-    WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import
+from tests.tools.mock_osm.MockOSM import MockOSM
+from .Fixtures import osm_wim # pylint: disable=unused-import
+from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
@@ -37,89 +32,69 @@ LOGGER.setLevel(logging.DEBUG)
 DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
 DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
+
+def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
+    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(descriptors)
 
-def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure links are created -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
+    for context_uuid, num_services in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
 
 
-def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    # TODO: restablish the tests of the events
-    # events_collector = EventsCollector(context_client, log_events_received=True)
-    # events_collector.start()
-
     # ----- Create Service ---------------------------------------------------------------------------------------------
     service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
     osm_wim.get_connectivity_service_status(service_uuid)
 
-    # ----- Validate collected events ----------------------------------------------------------------------------------
-
-    # packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
-    # optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
-    # optical_service_uuid = '{:s}:optical'.format(service_uuid)
-
-    # expected_events = [
-    #    # Create packet service and add first endpoint
-    #    ('ServiceEvent',    EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    
-    #    # Configure OLS controller, create optical service, create optical connection
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
-    #    ('ServiceEvent',    EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
-    #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)),
-    
-    #    # Configure endpoint packet devices, add second endpoint to service, create connection
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
-    #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)),
-    # ]
-    # check_events(events_collector, expected_events)
-
-    # # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    # events_collector.stop()
-
-
-def test_scenario_service_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+
     # ----- List entities - Ensure service is created ------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
+
+    for context_uuid, num_services in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+        assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI)
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # L3NM + TAPI
-    for service in response.services:
-        service_id = service.service_id
-        response = context_client.ListConnections(service_id)
-        LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
-            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
-        assert len(response.connections) == 1 # one connection per service
+        for service in response.services:
+            service_id = service.service_id
+            response = context_client.ListConnections(service_id)
+            LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+                grpc_message_to_json_string(service_id), len(response.connections),
+                grpc_message_to_json_string(response)))
+            assert len(response.connections) == 1 # one connection per service
 
 
-def test_scenario_kpi_values_created(monitoring_client: MonitoringClient):
+def test_scenario_kpi_values_created(
+    monitoring_client: MonitoringClient,    # pylint: disable=redefined-outer-name
+) -> None:
     """
     This test validates that KPI values have been inserted into the monitoring database.
     We short k KPI descriptors to test.
@@ -128,6 +103,22 @@ def test_scenario_kpi_values_created(monitoring_client: MonitoringClient):
     kpi_descriptors = random.choices(response.kpi_descriptor_list, k=2)
 
     for kpi_descriptor in kpi_descriptors:
+        MSG = 'KPI(kpi_uuid={:s}, device_uuid={:s}, endpoint_uuid={:s}, service_uuid={:s}, kpi_sample_type={:s})...'
+        LOGGER.info(MSG.format(
+            str(kpi_descriptor.kpi_id.kpi_id.uuid), str(kpi_descriptor.device_id.device_uuid.uuid),
+            str(kpi_descriptor.endpoint_id.endpoint_uuid.uuid), str(kpi_descriptor.service_id.service_uuid.uuid),
+            str(KpiSampleType.Name(kpi_descriptor.kpi_sample_type))))
         response = monitoring_client.GetInstantKpi(kpi_descriptor.kpi_id)
-        assert response.kpi_id.kpi_id.uuid == kpi_descriptor.kpi_id.kpi_id.uuid
-        assert response.timestamp.timestamp > 0
+        kpi_uuid = response.kpi_id.kpi_id.uuid
+        assert kpi_uuid == kpi_descriptor.kpi_id.kpi_id.uuid
+        kpi_value_type = response.kpi_value.WhichOneof('value')
+        if kpi_value_type is None:
+            MSG = '  KPI({:s}): No instant value found'
+            LOGGER.warning(MSG.format(str(kpi_uuid)))
+        else:
+            kpi_timestamp = response.timestamp.timestamp
+            assert kpi_timestamp > 0
+            assert kpi_value_type == 'floatVal'
+            kpi_value = getattr(response.kpi_value, kpi_value_type)
+            MSG = '  KPI({:s}): timestamp={:s} value_type={:s} value={:s}'
+            LOGGER.info(MSG.format(str(kpi_uuid), str(kpi_timestamp), str(kpi_value_type), str(kpi_value)))
diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py
index 5d9568cd81906ac76b600a2253a5e0bdf741bc01..0f8d088012bed164e4603a813bfe9154eda8f568 100644
--- a/src/tests/ofc22/tests/test_functional_delete_service.py
+++ b/src/tests/ofc22/tests/test_functional_delete_service.py
@@ -12,23 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, pytest
+import logging
+from common.Constants import DEFAULT_CONTEXT_UUID
 from common.DeviceTypes import DeviceTypeEnum
-from common.Settings import get_setting
-from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events
-from common.tools.object_factory.Connection import json_connection_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Service import json_service_id
+from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.tools.descriptor.Loader import DescriptorLoader
+from common.tools.object_factory.Context import json_context_id
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
-from tests.Fixtures import context_client
-from .Fixtures import osm_wim
-from .Objects import (
-    CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING,
-    WIM_PASSWORD, WIM_USERNAME)
+from tests.Fixtures import context_client   # pylint: disable=unused-import
+from tests.tools.mock_osm.MockOSM import MockOSM
+from .Fixtures import osm_wim # pylint: disable=unused-import
 
 
 LOGGER = logging.getLogger(__name__)
@@ -37,86 +31,69 @@ LOGGER.setLevel(logging.DEBUG)
 DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
 DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
 
+DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 
-def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+
+def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure service is created ------------------------------------------------------------------
+    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
+        descriptors = f.read()
+
+    descriptor_loader = DescriptorLoader(descriptors)
+
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # L3NM + TAPI
+    l3nm_service_uuids = set()
+    response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)))
+    assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI)
     for service in response.services:
         service_id = service.service_id
+
+        if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
+            service_uuid = service_id.service_uuid.uuid
+            l3nm_service_uuids.add(service_uuid)
+            osm_wim.conn_info[service_uuid] = {}
+
         response = context_client.ListConnections(service_id)
         LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
-            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
+            grpc_message_to_json_string(service_id), len(response.connections),
+            grpc_message_to_json_string(response)))
         assert len(response.connections) == 1 # one connection per service
 
+    # Identify service to delete
+    assert len(l3nm_service_uuids) == 1  # assume a single L3NM service has been created
+    l3nm_service_uuid = set(l3nm_service_uuids).pop()
 
-def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client, log_events_received=True)
-    #events_collector.start()
 
     # ----- Delete Service ---------------------------------------------------------------------------------------------
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # L3NM + TAPI
-    service_uuids = set()
-    for service in response.services:
-        if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: continue
-        service_uuid = service.service_id.service_uuid.uuid
-        service_uuids.add(service_uuid)
-        osm_wim.conn_info[service_uuid] = {}
-
-    assert len(service_uuids) == 1  # assume a single L3NM service has been created
-    service_uuid = set(service_uuids).pop()
-
-    osm_wim.delete_connectivity_service(service_uuid)
-
-    # ----- Validate collected events ----------------------------------------------------------------------------------
-    #packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
-    #optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
-    #optical_service_uuid = '{:s}:optical'.format(service_uuid)
-
-    #expected_events = [
-    #    ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
-    #    ('ServiceEvent',    EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    #    ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)),
-    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
-    #    ('ServiceEvent',    EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
-    #]
-    #check_events(events_collector, expected_events)
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
+    osm_wim.delete_connectivity_service(l3nm_service_uuid)
 
 
-def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure service is removed ------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == len(CONTEXTS)
+    assert len(response.contexts) == descriptor_loader.num_contexts
 
-    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == len(TOPOLOGIES)
+    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
+        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
+        assert len(response.topologies) == num_topologies
 
     response = context_client.ListDevices(Empty())
-    assert len(response.devices) == len(DEVICES)
+    assert len(response.devices) == descriptor_loader.num_devices
 
     response = context_client.ListLinks(Empty())
-    assert len(response.links) == len(LINKS)
+    assert len(response.links) == descriptor_loader.num_links
 
-    response = context_client.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
+    for context_uuid, num_services in descriptor_loader.num_services.items():
+        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
+        assert len(response.services) == 0
diff --git a/src/tests/tools/__init__.py b/src/tests/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/tools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/tools/mock_osm/Constants.py b/src/tests/tools/mock_osm/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..44d74169f0fd68073ca4ed5272f3dc7ef3ebf958
--- /dev/null
+++ b/src/tests/tools/mock_osm/Constants.py
@@ -0,0 +1,16 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+WIM_USERNAME = 'admin'
+WIM_PASSWORD = 'admin'
diff --git a/src/compute/tests/mock_osm/MockOSM.py b/src/tests/tools/mock_osm/MockOSM.py
similarity index 100%
rename from src/compute/tests/mock_osm/MockOSM.py
rename to src/tests/tools/mock_osm/MockOSM.py
diff --git a/src/tests/tools/mock_osm/Tools.py b/src/tests/tools/mock_osm/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..25a8b6111443424e8bfd2b35501b96a9a762325f
--- /dev/null
+++ b/src/tests/tools/mock_osm/Tools.py
@@ -0,0 +1,48 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Optional
+
+def compose_service_endpoint_id(site_id : str, endpoint_id : Dict):
+    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
+    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
+    return ':'.join([site_id, device_uuid, endpoint_uuid])
+
+def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, priority=None, redundant=[]):
+    ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
+    ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
+    service_endpoint_id = compose_service_endpoint_id(site_id, ce_endpoint_id)
+    if pe_device_id is None:
+        bearer = '{:s}:{:s}'.format(ce_device_uuid, ce_endpoint_uuid)
+    else:
+        pe_device_uuid = pe_device_id['device_uuid']['uuid']
+        bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
+    mapping = {
+        'service_endpoint_id': service_endpoint_id,
+        'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
+        'service_mapping_info': {
+            'site-id': site_id,
+            'bearer': {'bearer-reference': bearer},
+        }
+    }
+    if priority is not None: mapping['service_mapping_info']['priority'] = priority
+    if len(redundant) > 0: mapping['service_mapping_info']['redundant'] = redundant
+    return service_endpoint_id, mapping
+
+def connection_point(service_endpoint_id : str, encapsulation_type : str, vlan_id : int):
+    return {
+        'service_endpoint_id': service_endpoint_id,
+        'service_endpoint_encapsulation_type': encapsulation_type,
+        'service_endpoint_encapsulation_info': {'vlan': vlan_id}
+    }
diff --git a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py
similarity index 100%
rename from src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py
rename to src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py
diff --git a/src/tests/tools/mock_osm/__init__.py b/src/tests/tools/mock_osm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/tools/mock_osm/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/compute/tests/mock_osm/acknowledgements.txt b/src/tests/tools/mock_osm/acknowledgements.txt
similarity index 100%
rename from src/compute/tests/mock_osm/acknowledgements.txt
rename to src/tests/tools/mock_osm/acknowledgements.txt
diff --git a/src/compute/tests/mock_osm/sdnconn.py b/src/tests/tools/mock_osm/sdnconn.py
similarity index 100%
rename from src/compute/tests/mock_osm/sdnconn.py
rename to src/tests/tools/mock_osm/sdnconn.py
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 979d0664bc42221e3559eef498bd53562fe073e7..0e008734730867bca741d748c49e3b0589b40e48 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -14,8 +14,8 @@
 
 import json, logging, re
 from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
-from common.proto.context_pb2 import (
-    Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList, TopologyId, TopologyIdList)
+from common.proto.context_pb2 import Empty, ContextIdList, TopologyId, TopologyIdList
+from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology_id
@@ -23,9 +23,6 @@ from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
-from webui.service.main.DescriptorTools import (
-    format_custom_config_rules, get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices,
-    get_descriptors_add_topologies, split_devices_by_rules)
 from webui.service.main.forms import ContextTopologyForm, DescriptorForm
 
 main = Blueprint('main', __name__)
@@ -37,38 +34,6 @@ slice_client = SliceClient()
 
 logger = logging.getLogger(__name__)
 
-ENTITY_TO_TEXT = {
-    # name   => singular,    plural
-    'context'   : ('Context',    'Contexts'   ),
-    'topology'  : ('Topology',   'Topologies' ),
-    'device'    : ('Device',     'Devices'    ),
-    'link'      : ('Link',       'Links'      ),
-    'service'   : ('Service',    'Services'   ),
-    'slice'     : ('Slice',      'Slices'     ),
-    'connection': ('Connection', 'Connections'),
-}
-
-ACTION_TO_TEXT = {
-    # action =>  infinitive,  past
-    'add'     : ('Add',       'Added'),
-    'update'  : ('Update',    'Updated'),
-    'config'  : ('Configure', 'Configured'),
-}
-
-def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities):
-    entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name]
-    action_infinitive, action_past = ACTION_TO_TEXT[action_name]
-    num_ok, num_err = 0, 0
-    for entity in entities:
-        try:
-            grpc_method(grpc_class(**entity))
-            num_ok += 1
-        except Exception as e: # pylint: disable=broad-except
-            flash(f'Unable to {action_infinitive} {entity_name_singluar} {str(entity)}: {str(e)}', 'error')
-            num_err += 1
-    if num_ok : flash(f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')
-    if num_err: flash(f'{str(num_err)} {entity_name_plural} failed', 'danger')
-
 def process_descriptors(descriptors):
     try:
         descriptors_file = request.files[descriptors.name]
@@ -78,80 +43,10 @@ def process_descriptors(descriptors):
         flash(f'Unable to load descriptor file: {str(e)}', 'danger')
         return
 
-    dummy_mode  = descriptors.get('dummy_mode' , False)
-    contexts    = descriptors.get('contexts'   , [])
-    topologies  = descriptors.get('topologies' , [])
-    devices     = descriptors.get('devices'    , [])
-    links       = descriptors.get('links'      , [])
-    services    = descriptors.get('services'   , [])
-    slices      = descriptors.get('slices'     , [])
-    connections = descriptors.get('connections', [])
-
-    # Format CustomConfigRules in Devices, Services and Slices provided in JSON format
-    for device in devices:
-        config_rules = device.get('device_config', {}).get('config_rules', [])
-        config_rules = format_custom_config_rules(config_rules)
-        device['device_config']['config_rules'] = config_rules
-
-    for service in services:
-        config_rules = service.get('service_config', {}).get('config_rules', [])
-        config_rules = format_custom_config_rules(config_rules)
-        service['service_config']['config_rules'] = config_rules
-
-    for slice in slices:
-        config_rules = slice.get('slice_config', {}).get('config_rules', [])
-        config_rules = format_custom_config_rules(config_rules)
-        slice['slice_config']['config_rules'] = config_rules
-
-
-    # Context and Topology require to create the entity first, and add devices, links, services, slices, etc. in a
-    # second stage.
-    contexts_add = get_descriptors_add_contexts(contexts)
-    topologies_add = get_descriptors_add_topologies(topologies)
-
-    if dummy_mode:
-        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
-        context_client.connect()
-        process_descriptor('context',    'add',    context_client.SetContext,    Context,    contexts_add  )
-        process_descriptor('topology',   'add',    context_client.SetTopology,   Topology,   topologies_add)
-        process_descriptor('device',     'add',    context_client.SetDevice,     Device,     devices       )
-        process_descriptor('link',       'add',    context_client.SetLink,       Link,       links         )
-        process_descriptor('service',    'add',    context_client.SetService,    Service,    services      )
-        process_descriptor('slice',      'add',    context_client.SetSlice,      Slice,      slices        )
-        process_descriptor('connection', 'add',    context_client.SetConnection, Connection, connections   )
-        process_descriptor('context',    'update', context_client.SetContext,    Context,    contexts      )
-        process_descriptor('topology',   'update', context_client.SetTopology,   Topology,   topologies    )
-        context_client.close()
-    else:
-        # Normal mode: follows the automated workflows in the different components
-        assert len(connections) == 0, 'in normal mode, connections should not be set'
-
-        # Device, Service and Slice require to first create the entity and the configure it
-        devices_add, devices_config = split_devices_by_rules(devices)
-        services_add = get_descriptors_add_services(services)
-        slices_add = get_descriptors_add_slices(slices)
-
-        context_client.connect()
-        device_client.connect()
-        service_client.connect()
-        slice_client.connect()
-
-        process_descriptor('context',    'add',    context_client.SetContext,      Context,    contexts_add  )
-        process_descriptor('topology',   'add',    context_client.SetTopology,     Topology,   topologies_add)
-        process_descriptor('device',     'add',    device_client .AddDevice,       Device,     devices_add   )
-        process_descriptor('device',     'config', device_client .ConfigureDevice, Device,     devices_config)
-        process_descriptor('link',       'add',    context_client.SetLink,         Link,       links         )
-        process_descriptor('service',    'add',    service_client.CreateService,   Service,    services_add  )
-        process_descriptor('service',    'update', service_client.UpdateService,   Service,    services      )
-        process_descriptor('slice',      'add',    slice_client  .CreateSlice,     Slice,      slices_add    )
-        process_descriptor('slice',      'update', slice_client  .UpdateSlice,     Slice,      slices        )
-        process_descriptor('context',    'update', context_client.SetContext,      Context,    contexts      )
-        process_descriptor('topology',   'update', context_client.SetTopology,     Topology,   topologies    )
-
-        slice_client.close()
-        service_client.close()
-        device_client.close()
-        context_client.close()
+    descriptor_loader = DescriptorLoader(descriptors)
+    results = descriptor_loader.process()
+    for message,level in compose_notifications(results):
+        flash(message, level)
 
 @main.route('/', methods=['GET', 'POST'])
 def home():
@@ -191,7 +86,7 @@ def home():
         if descriptor_form.validate_on_submit():
             process_descriptors(descriptor_form.descriptors)
             return redirect(url_for("main.home"))
-    except Exception as e:
+    except Exception as e: # pylint: disable=broad-except
         logger.exception('Descriptor load failed')
         flash(f'Descriptor load failed: `{str(e)}`', 'danger')
     finally:
diff --git a/tutorial/2-2-ofc22.md b/tutorial/2-2-ofc22.md
index 3b55a0961da78fdc78a8feb31499608589b9d0be..04d585d24cc046e6a1aadc1c93118a1b36855aca 100644
--- a/tutorial/2-2-ofc22.md
+++ b/tutorial/2-2-ofc22.md
@@ -37,9 +37,6 @@ environment and a TeraFlowSDN controller instance as described in the
 [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python
 environment as described in
 [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
-Remember to source the scenario settings, e.g., `cd ~/tfs-ctrl && source ofc22/deploy_specs.sh` in each terminal you open.
-Then, re-build the protocol buffers code from the proto files:
-`./proto/generate_code_python.sh`
 
 
 ## 2.2.4. Access to the WebUI and Dashboard
@@ -55,25 +52,33 @@ Notes:
 
 ## 2.2.5. Test execution
 
-Before executing the tests, the environment variables need to be prepared. 
-First, make sure to load your deployment variables by:
+Before executing the tests, we need to prepare a few things.
+
+First, you need to make sure that you have all the gRPC-generate code in your folder.
+To do so, run:
 
 ```
-source my_deploy.sh
+proto/generate_code_python.sh
 ```
 
-Then, you also need to load the environment variables to support the execution of the 
-tests by:
+Then, it is time to deploy TeraFlowSDN with the correct specification for this scenario.
+Make sure to load your deployment variables for this scenario by:
 
 ```
-source tfs_runtime_env_vars.sh
+source ofc22/deploy_specs.sh
 ```
 
-You also need to make sure that you have all the gRPC-generate code in your folder.
-To do so, run:
+Then, you need to deploy the components by running:
 
 ```
-proto/generate_code_python.sh
+./deploy.sh
+```
+
+After the deployment is finished, you need to load the environment variables to support 
+the execution of the tests by:
+
+```
+source tfs_runtime_env_vars.sh
 ```
 
 To execute this functional test, four main steps needs to be carried out:
@@ -90,8 +95,24 @@ See the troubleshooting section if needed.
 You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts
 after you execute each step.
 
+There are two ways to execute the functional tests, *running all the tests with a single script* or *running each test independently*.
+In the following we start with the first option, then we comment on how to run each test independently.
+
+
+### 2.2.5.1. Running all tests with a single script
+
+We have a script that executes all the steps at once.
+It is meant for being used to test if all components involved in this scenario are working correct.
+To run all the functional tests, you can run:
+
+```
+ofc22/run_tests_and_coverage.sh
+```
+
+The following sections explain each one of the steps.
 
-### 2.2.5.1. Device bootstrapping
+
+### 2.2.5.2. Device bootstrapping
 
 This step configures some basic entities (Context and Topology), the devices, and the 
 links in the topology.
@@ -103,7 +124,11 @@ The expected results are:
 
 To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that
 contains the descriptors of the contexts, topologies, devices, and links, or by 
-executing the `./ofc22/run_test_01_bootstrap.sh` script.
+executing the script:
+
+```
+./ofc22/run_test_01_bootstrap.sh
+```
 
 When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you 
 should see the monitoring data being plotted and updated every 5 seconds (by default). 
@@ -117,12 +142,16 @@ Note here that the emulated devices produce synthetic randomly-generated monitor
 and do not represent any particularservices configured.
 
 
-### 2.2.5.2. L3VPN Service creation
+### 2.2.5.3. L3VPN Service creation
 
 This step configures a new service emulating the request an OSM WIM would make by means 
 of a Mock OSM instance.
 
-To run this step, execute the `./ofc22/run_test_02_create_service.sh` script.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_02_create_service.sh
+```
 
 When the script finishes, check the WebUI *Services* tab. You should see that two 
 services have been created, one for the optical layer and another for the packet layer. 
@@ -133,13 +162,18 @@ the plots with the monitored data for the device.
 By default, device R1-EMU is selected.
 
 
-### 2.2.5.3. L3VPN Service removal
+### 2.2.5.4. L3VPN Service removal
 
 This step deconfigures the previously created services emulating the request an OSM WIM 
 would make by means of a Mock OSM instance.
 
-To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete 
-the L3NM service from the WebUI.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_03_delete_service.sh
+```
+
+or delete the L3NM service from the WebUI.
 
 When the script finishes, check the WebUI *Services* tab.
 You should see that the two services have been removed.
@@ -149,12 +183,16 @@ In the Grafana Dashboard, given that there is no service configured, you should
 0-valued flat plot again.
 
 
-### 2.2.5.4. Cleanup
+### 2.2.5.5. Cleanup
 
 This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities 
 for completeness.
 
-To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_04_cleanup.sh
+```
 
 When the script finishes, check the WebUI *Devices* tab, you should see that the devices 
 have been removed.