diff --git a/report_coverage_slice.sh b/report_coverage_slice.sh
deleted file mode 100755
index f783ec069329a9efe100154a2702a72a93e0ad8a..0000000000000000000000000000000000000000
--- a/report_coverage_slice.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-./report_coverage_all.sh | grep --color -E -i "^slice/.*$|$"
diff --git a/scripts/report_coverage_slice.sh b/scripts/report_coverage_slice.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f9b17e8bd162500e2be77a871f7f2976f923ca68
--- /dev/null
+++ b/scripts/report_coverage_slice.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+./report_coverage_all.sh | grep --color -E -i "^slice/.*$|$"
diff --git a/scripts/run_tests_locally-device-all.sh b/scripts/run_tests_locally-device-all.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2cf8faaf50355a3cc5f3a0206498ed4dacb48523
--- /dev/null
+++ b/scripts/run_tests_locally-device-all.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+
+# Useful flags for pytest:
+#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_emulated.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_openconfig.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_tapi.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_p4.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_microwave.py
diff --git a/scripts/run_tests_in_kubernetes.sh b/scripts/run_tests_locally-device-emulated.sh
similarity index 61%
rename from scripts/run_tests_in_kubernetes.sh
rename to scripts/run_tests_locally-device-emulated.sh
index fc0e1425731bf333a88eca6b65a3fc12122c7768..ab4f77adaf9c0549551c91d944c1c6db77a8b9cb 100755
--- a/scripts/run_tests_in_kubernetes.sh
+++ b/scripts/run_tests_locally-device-emulated.sh
@@ -14,11 +14,15 @@
 # limitations under the License.
 
 
-IMAGE_NAME='integration_tester'
-IMAGE_TAG='latest'
-CI_REGISTRY_IMAGE='registry.gitlab.com/teraflow-h2020/controller'
+PROJECTDIR=`pwd`
 
-kubectl delete pod $(echo $IMAGE_NAME | sed -r 's/[^a-zA-Z0-9\.\-]/-/g') --wait=true --ignore-not-found=true
-kubectl get all
-kubectl run $(echo $IMAGE_NAME | sed -r 's/[^a-zA-Z0-9\.\-]/-/g') --image "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" --restart=Never --rm -i
-kubectl get all
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+
+# Useful flags for pytest:
+#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_emulated.py
diff --git a/scripts/run_test_microwave_device.sh b/scripts/run_tests_locally-device-microwave.sh
similarity index 91%
rename from scripts/run_test_microwave_device.sh
rename to scripts/run_tests_locally-device-microwave.sh
index 34317b56484108d8ef83ef1c1eb74fbc31bfc25c..e03630c9f63c65cae91464b76cc3ddc447835f42 100755
--- a/scripts/run_test_microwave_device.sh
+++ b/scripts/run_tests_locally-device-microwave.sh
@@ -24,5 +24,5 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc
 # Useful flags for pytest:
 #-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
 
-coverage run --rcfile=$RCFILE --append -m pytest -s --log-level=INFO --verbose \
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     device/tests/test_unitary_microwave.py
diff --git a/scripts/run_tests_locally-device-openconfig.sh b/scripts/run_tests_locally-device-openconfig.sh
new file mode 100755
index 0000000000000000000000000000000000000000..83d4a0545a3386395ead97f40d45c034350c73b9
--- /dev/null
+++ b/scripts/run_tests_locally-device-openconfig.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+
+# Useful flags for pytest:
+#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_openconfig.py
diff --git a/scripts/run_tests_locally-device.sh b/scripts/run_tests_locally-device-p4.sh
similarity index 96%
rename from scripts/run_tests_locally-device.sh
rename to scripts/run_tests_locally-device-p4.sh
index ba6c0b6a58031720addc17cc0de9169e592099f5..36b381a3cd9214603456828b41e6d70b8c6c908d 100755
--- a/scripts/run_tests_locally-device.sh
+++ b/scripts/run_tests_locally-device-p4.sh
@@ -25,4 +25,4 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc
 #-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
 
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-    device/tests/test_unitary.py
+    device/tests/test_unitary_p4.py
diff --git a/scripts/run_tests_locally-device-tapi.sh b/scripts/run_tests_locally-device-tapi.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a281466b677f256b2ce9fe7770bf2b052ef59126
--- /dev/null
+++ b/scripts/run_tests_locally-device-tapi.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+
+# Useful flags for pytest:
+#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/test_unitary_tapi.py
diff --git a/scripts/run_tests_locally.sh b/scripts/run_tests_locally.sh
index 633510a54c33d0603be7cc35706114c29561b958..4a95fd8be7ac83ff0b5ec5a3db47c0cf4fae06f4 100755
--- a/scripts/run_tests_locally.sh
+++ b/scripts/run_tests_locally.sh
@@ -63,7 +63,7 @@ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     context/tests/test_unitary.py
 
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-    device/tests/test_unitary.py
+    device/tests/test_unitary_emulated.py
 
 coverage run --rcfile=$RCFILE --append -m pytest -s --log-level=INFO --verbose \
     l3_centralizedattackdetector/tests/test_unitary.py
diff --git a/src/common/Constants.py b/src/common/Constants.py
index 9d5edc235f582f7df4a274b15bf646413d14b1bd..dbe4124e792db73e122a7b436ca19be814aed761 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -12,5 +12,64 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+from enum import Enum
+
+# Default logging level
+DEFAULT_LOG_LEVEL = logging.WARNING
+
+# Default gRPC server settings
+DEFAULT_GRPC_BIND_ADDRESS = '0.0.0.0'
+DEFAULT_GRPC_MAX_WORKERS  = 10
+DEFAULT_GRPC_GRACE_PERIOD = 60
+
+# Default HTTP server settings
+DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0'
+
+# Default Prometheus settings
+DEFAULT_METRICS_PORT = 9192
+
+# Default context and topology UUIDs
 DEFAULT_CONTEXT_UUID = 'admin'
 DEFAULT_TOPOLOGY_UUID = 'admin'
+
+# Default service names
+class ServiceNameEnum(Enum):
+    CONTEXT       = 'context'
+    DEVICE        = 'device'
+    SERVICE       = 'service'
+    SLICE         = 'slice'
+    AUTOMATION    = 'automation'
+    POLICY        = 'policy'
+    MONITORING    = 'monitoring'
+    DLT           = 'dlt'
+    COMPUTE       = 'compute'
+    CYBERSECURITY = 'cybersecurity'
+    INTERDOMAIN   = 'interdomain'
+
+# Default gRPC service ports
+DEFAULT_SERVICE_GRPC_PORTS = {
+    ServiceNameEnum.CONTEXT      .value :  1010,
+    ServiceNameEnum.DEVICE       .value :  2020,
+    ServiceNameEnum.SERVICE      .value :  3030,
+    ServiceNameEnum.SLICE        .value :  4040,
+    ServiceNameEnum.AUTOMATION   .value :  5050,
+    ServiceNameEnum.POLICY       .value :  6060,
+    ServiceNameEnum.MONITORING   .value :  7070,
+    ServiceNameEnum.DLT          .value :  8080,
+    ServiceNameEnum.COMPUTE      .value :  9090,
+    ServiceNameEnum.CYBERSECURITY.value : 10000,
+    ServiceNameEnum.INTERDOMAIN  .value : 10010,
+}
+
+# Default HTTP/REST-API service ports
+DEFAULT_SERVICE_HTTP_PORTS = {
+    ServiceNameEnum.CONTEXT   .value : 8080,
+    ServiceNameEnum.COMPUTE   .value : 8080,
+}
+
+# Default HTTP/REST-API service base URLs
+DEFAULT_SERVICE_HTTP_BASEURLS = {
+    ServiceNameEnum.CONTEXT   .value : '/api',
+    ServiceNameEnum.COMPUTE   .value : '/restconf/data',
+}
diff --git a/src/common/Settings.py b/src/common/Settings.py
index f6bc214bf7500227c621b197e2626e09f2dafb93..e9d5f406d2acad0d31ae94b604183c129d28f146 100644
--- a/src/common/Settings.py
+++ b/src/common/Settings.py
@@ -14,15 +14,33 @@
 
 import logging, os, time
 from typing import List
+from common.Constants import (
+    DEFAULT_GRPC_BIND_ADDRESS, DEFAULT_GRPC_GRACE_PERIOD, DEFAULT_GRPC_MAX_WORKERS, DEFAULT_HTTP_BIND_ADDRESS,
+    DEFAULT_LOG_LEVEL, DEFAULT_METRICS_PORT, DEFAULT_SERVICE_GRPC_PORTS, DEFAULT_SERVICE_HTTP_BASEURLS,
+    DEFAULT_SERVICE_HTTP_PORTS, ServiceNameEnum
+)
 
 LOGGER = logging.getLogger(__name__)
 
 DEFAULT_RESTART_DELAY = 5.0 # seconds
 
+ENVVAR_KUBERNETES_PORT            = 'KUBERNETES_PORT'
+ENVVAR_GRPC_BIND_ADDRESS          = 'GRPC_BIND_ADDRESS'
+ENVVAR_GRPC_MAX_WORKERS           = 'GRPC_MAX_WORKERS'
+ENVVAR_GRPC_GRACE_PERIOD          = 'GRPC_GRACE_PERIOD'
+ENVVAR_HTTP_BIND_ADDRESS          = 'HTTP_BIND_ADDRESS'
+ENVVAR_LOG_LEVEL                  = 'LOG_LEVEL'
+ENVVAR_METRICS_PORT               = 'METRICS_PORT'
+
+ENVVAR_SUFIX_SERVICE_BASEURL_HTTP = 'SERVICE_BASEURL_HTTP'
+ENVVAR_SUFIX_SERVICE_HOST         = 'SERVICE_HOST'
+ENVVAR_SUFIX_SERVICE_PORT_GRPC    = 'SERVICE_PORT_GRPC'
+ENVVAR_SUFIX_SERVICE_PORT_HTTP    = 'SERVICE_PORT_HTTP'
+
 def wait_for_environment_variables(
     required_environment_variables : List[str] = [], wait_delay_seconds : float = DEFAULT_RESTART_DELAY
 ):
-    if 'KUBERNETES_PORT' not in os.environ: return # We're not running in Kubernetes, nothing to wait for
+    if ENVVAR_KUBERNETES_PORT not in os.environ: return # We're not running in Kubernetes, nothing to wait for
     missing_variables = set(required_environment_variables).difference(set(os.environ.keys()))
     if len(missing_variables) == 0: return # We have all environment variables defined
     msg = 'Variables({:s}) are missing in Environment({:s}), restarting in {:f} seconds...'
@@ -36,4 +54,45 @@ def get_setting(name, **kwargs):
         value = kwargs['settings'].pop(name, value)
     if value is not None: return value
     if 'default' in kwargs: return kwargs['default']
-    raise Exception('Setting({}) not specified in environment or configuration'.format(name))
+    raise Exception('Setting({:s}) not specified in environment or configuration'.format(str(name)))
+
+def get_env_var_name(service_name : ServiceNameEnum, env_var_group):
+    return ('{:s}SERVICE_{:s}'.format(service_name.value, env_var_group)).upper()
+
+def get_service_host(service_name : ServiceNameEnum):
+    envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_HOST)
+    default_value = ('{:s}service'.format(service_name.value))
+    return get_setting(envvar_name, default=default_value)
+
+def get_service_port_grpc(service_name : ServiceNameEnum):
+    envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_PORT_GRPC)
+    default_value = DEFAULT_SERVICE_GRPC_PORTS.get(service_name.value)
+    return get_setting(envvar_name, default=default_value)
+
+def get_service_port_http(service_name : ServiceNameEnum):
+    envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_PORT_HTTP)
+    default_value = DEFAULT_SERVICE_HTTP_PORTS.get(service_name.value)
+    return get_setting(envvar_name, default=default_value)
+
+def get_service_baseurl_http(service_name : ServiceNameEnum):
+    envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_BASEURL_HTTP)
+    default_value = DEFAULT_SERVICE_HTTP_BASEURLS.get(service_name.value)
+    return get_setting(envvar_name, default=default_value)
+
+def get_log_level():
+    return get_setting(ENVVAR_LOG_LEVEL, default=DEFAULT_LOG_LEVEL)
+
+def get_metrics_port():
+    return get_setting(ENVVAR_METRICS_PORT, default=DEFAULT_METRICS_PORT)
+
+def get_grpc_bind_address():
+    return get_setting(ENVVAR_GRPC_BIND_ADDRESS, default=DEFAULT_GRPC_BIND_ADDRESS)
+
+def get_grpc_max_workers():
+    return get_setting(ENVVAR_GRPC_MAX_WORKERS, default=DEFAULT_GRPC_MAX_WORKERS)
+
+def get_grpc_grace_period():
+    return get_setting(ENVVAR_GRPC_GRACE_PERIOD, default=DEFAULT_GRPC_GRACE_PERIOD)
+
+def get_http_bind_address():
+    return get_setting(ENVVAR_HTTP_BIND_ADDRESS, default=DEFAULT_HTTP_BIND_ADDRESS)
diff --git a/src/common/tests/MockService.py b/src/common/tests/MockService.py
deleted file mode 100644
index 25f36e009be3a65c1ba9c18c7707742a75253a75..0000000000000000000000000000000000000000
--- a/src/common/tests/MockService.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import grpc, logging
-from concurrent import futures
-
-GRPC_MAX_WORKERS  = 10
-GRPC_GRACE_PERIOD = 60
-
-class MockService:
-    def __init__(self, address, port, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD, cls_name=__name__):
-        self.logger = logging.getLogger(cls_name)
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.pool = None
-        self.server = None
-
-    def install_servicers(self):
-        pass
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        self.logger.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
-
-        self.install_servicers()
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        self.logger.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-
-        self.logger.debug('Service started')
-
-    def stop(self):
-        self.logger.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.server.stop(self.grace_period)
-        self.logger.debug('Service stopped')
diff --git a/src/device/tests/MockMonitoringServiceServicerImpl.py b/src/common/tests/MockServicerImpl_Monitoring.py
similarity index 70%
rename from src/device/tests/MockMonitoringServiceServicerImpl.py
rename to src/common/tests/MockServicerImpl_Monitoring.py
index 05ca43dda0418df151bc3dfe255a90d75b50a088..9f646c3667485ae3240c59284a73fef01bffb7bb 100644
--- a/src/device/tests/MockMonitoringServiceServicerImpl.py
+++ b/src/common/tests/MockServicerImpl_Monitoring.py
@@ -12,18 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
+
+import grpc, logging
 from queue import Queue
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from monitoring.proto.context_pb2 import Empty
 from monitoring.proto.monitoring_pb2 import Kpi
 from monitoring.proto.monitoring_pb2_grpc import MonitoringServiceServicer
 
 LOGGER = logging.getLogger(__name__)
 
-class MockMonitoringServiceServicerImpl(MonitoringServiceServicer):
+class MockServicerImpl_Monitoring(MonitoringServiceServicer):
     def __init__(self, queue_samples : Queue):
+        LOGGER.info('[__init__] Creating Servicer...')
         self.queue_samples = queue_samples
+        LOGGER.info('[__init__] Servicer Created')
 
-    def IncludeKpi(self, request : Kpi, context) -> Empty:
+    def IncludeKpi(self, request : Kpi, context : grpc.ServicerContext) -> Empty:
+        LOGGER.info('[IncludeKpi] request={:s}'.format(grpc_message_to_json_string(request)))
         self.queue_samples.put(request)
         return Empty()
diff --git a/src/common/tools/object_factory/Device.py b/src/common/tools/object_factory/Device.py
index d5990db5e9be8df8a9948169bb5444b1778cffdc..e144b5b4c7af6ffc5a8ed2d6729a53f74d7f9949 100644
--- a/src/common/tools/object_factory/Device.py
+++ b/src/common/tools/object_factory/Device.py
@@ -20,8 +20,8 @@ from context.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusE
 
 DEVICE_DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
 
+DEVICE_EMUOLS_TYPE  = DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM.value
 DEVICE_EMUPR_TYPE   = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
-DEVICE_EMUOLS_TYPE   = DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM.value
 DEVICE_EMU_DRIVERS  = [DeviceDriverEnum.DEVICEDRIVER_UNDEFINED]
 DEVICE_EMU_ADDRESS  = '127.0.0.1'
 DEVICE_EMU_PORT     = '0'
diff --git a/src/common/tools/object_factory/EndPoint.py b/src/common/tools/object_factory/EndPoint.py
index d750604365fc616536374dd3541a17613da4746f..9eca5e96371713d1e095eba4666ee806ad6cf71e 100644
--- a/src/common/tools/object_factory/EndPoint.py
+++ b/src/common/tools/object_factory/EndPoint.py
@@ -13,13 +13,21 @@
 # limitations under the License.
 
 import copy
-from typing import Dict, List, Optional
+from typing import Dict, List, Optional, Tuple
 
 def json_endpoint_id(device_id : Dict, endpoint_uuid : str, topology_id : Optional[Dict] = None):
     result = {'device_id': copy.deepcopy(device_id), 'endpoint_uuid': {'uuid': endpoint_uuid}}
     if topology_id is not None: result['topology_id'] = copy.deepcopy(topology_id)
     return result
 
+def json_endpoint_ids(
+        device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]], topology_id : Optional[Dict] = None
+    ):
+    return [
+        json_endpoint_id(device_id, endpoint_uuid, topology_id=topology_id)
+        for endpoint_uuid, _, _ in endpoint_descriptors
+    ]
+
 def json_endpoint(
         device_id : Dict, endpoint_uuid : str, endpoint_type : str, topology_id : Optional[Dict] = None,
         kpi_sample_types : List[int] = []
@@ -31,3 +39,13 @@ def json_endpoint(
     }
     if len(kpi_sample_types) > 0: result['kpi_sample_types'] = copy.deepcopy(kpi_sample_types)
     return result
+
+def json_endpoints(
+        device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]], topology_id : Optional[Dict] = None
+    ):
+    return [
+        json_endpoint(
+            device_id, endpoint_uuid, endpoint_type, topology_id=topology_id,
+            kpi_sample_types=endpoint_sample_types)
+        for endpoint_uuid, endpoint_type, endpoint_sample_types in endpoint_descriptors
+    ]
diff --git a/src/common/tools/object_factory/Link.py b/src/common/tools/object_factory/Link.py
index fac27945912be6ee24b3808f567ea160e017fb37..624cbb8dcc868ce3575062e31ae3b0609c300637 100644
--- a/src/common/tools/object_factory/Link.py
+++ b/src/common/tools/object_factory/Link.py
@@ -15,6 +15,11 @@
 import copy
 from typing import Dict, List
 
+def get_link_uuid(a_device_id : Dict, a_endpoint_id : Dict, z_device_id : Dict, z_endpoint_id : Dict) -> str:
+    return '{:s}/{:s}=={:s}/{:s}'.format(
+        a_device_id['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'],
+        z_device_id['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
+
 def json_link_id(link_uuid : str):
     return {'link_uuid': {'uuid': link_uuid}}
 
diff --git a/src/common/tools/service/GenericGrpcService.py b/src/common/tools/service/GenericGrpcService.py
new file mode 100644
index 0000000000000000000000000000000000000000..61fccdb02b5a6cbd23600093bcba4c69bf142d83
--- /dev/null
+++ b/src/common/tools/service/GenericGrpcService.py
@@ -0,0 +1,71 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional, Union
+import grpc, logging
+from concurrent import futures
+from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
+from grpc_health.v1.health_pb2 import HealthCheckResponse
+from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
+from common.Settings import get_grpc_bind_address, get_grpc_grace_period, get_grpc_max_workers
+
+class GenericGrpcService:
+    def __init__(
+        self, bind_port : Union[str, int], bind_address : Optional[str] = None, max_workers : Optional[int] = None,
+        grace_period : Optional[int] = None, enable_health_servicer : bool = True, cls_name : str = __name__
+    ) -> None:
+        self.logger = logging.getLogger(cls_name)
+        self.bind_port = bind_port
+        self.bind_address = get_grpc_bind_address() if bind_address is None else bind_address
+        self.max_workers = get_grpc_max_workers() if max_workers is None else max_workers
+        self.grace_period = get_grpc_grace_period() if grace_period is None else grace_period
+        self.enable_health_servicer = enable_health_servicer
+        self.endpoint = None
+        self.health_servicer = None
+        self.pool = None
+        self.server = None
+
+    def install_servicers(self):
+        pass
+
+    def start(self):
+        self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port))
+        self.logger.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
+            str(self.endpoint), str(self.max_workers)))
+
+        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
+        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
+
+        self.install_servicers()
+
+        if self.enable_health_servicer:
+            self.health_servicer = HealthServicer(
+                experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
+            add_HealthServicer_to_server(self.health_servicer, self.server)
+
+        self.bind_port = self.server.add_insecure_port(self.endpoint)
+        self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port))
+        self.logger.info('Listening on {:s}...'.format(str(self.endpoint)))
+        self.server.start()
+        if self.enable_health_servicer:
+            self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
+
+        self.logger.debug('Service started')
+
+    def stop(self):
+        self.logger.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
+        if self.enable_health_servicer:
+            self.health_servicer.enter_graceful_shutdown()
+        self.server.stop(self.grace_period)
+        self.logger.debug('Service stopped')
diff --git a/src/common/tools/service/GenericRestServer.py b/src/common/tools/service/GenericRestServer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4325fe1dbc0169665a1281b27e6993670add337c
--- /dev/null
+++ b/src/common/tools/service/GenericRestServer.py
@@ -0,0 +1,59 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools, logging, threading, time
+from typing import Optional, Union
+from flask import Flask, request
+from flask_restful import Api, Resource
+from werkzeug.serving import make_server
+from common.Settings import get_http_bind_address
+
+logging.getLogger('werkzeug').setLevel(logging.WARNING)
+
+
+def log_request(logger, response):
+    timestamp = time.strftime('[%Y-%b-%d %H:%M]')
+    logger.info('%s %s %s %s %s', timestamp, request.remote_addr, request.method, request.full_path, response.status)
+    return response
+
+class GenericRestServer(threading.Thread):
+    def __init__(
+        self, bind_port : Union[str, int], base_url : str, bind_address : Optional[str] = None,
+        cls_name : str = __name__
+    ) -> None:
+        threading.Thread.__init__(self, daemon=True)
+        self.logger = logging.getLogger(cls_name)
+        self.bind_port = bind_port
+        self.base_url = base_url
+        self.bind_address = get_http_bind_address() if bind_address is None else bind_address
+        self.endpoint = 'http://{:s}:{:s}{:s}'.format(str(self.bind_address), str(self.bind_port), str(self.base_url))
+        self.srv = None
+        self.ctx = None
+        self.app = Flask(__name__)
+        self.app.after_request(functools.partial(log_request, self.logger))
+        self.api = Api(self.app, prefix=self.base_url)
+
+    def add_resource(self, resource : Resource, *urls, **kwargs):
+        self.api.add_resource(resource, *urls, **kwargs)
+
+    def run(self):
+        self.srv = make_server(self.bind_address, self.bind_port, self.app, threaded=True)
+        self.ctx = self.app.app_context()
+        self.ctx.push()
+
+        self.logger.info('Listening on {:s}...'.format(str(self.endpoint)))
+        self.srv.serve_forever()
+
+    def shutdown(self):
+        self.srv.shutdown()
diff --git a/src/common/tools/service/__init__.py b/src/common/tools/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/common/tools/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/compute/Config.py b/src/compute/Config.py
index c568be4476326be0080aad9f88be0183d82ef833..341ae77f506ce1d2488c3d494262ee26ca6dd7eb 100644
--- a/src/compute/Config.py
+++ b/src/compute/Config.py
@@ -12,23 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
 from werkzeug.security import generate_password_hash
 
-# General settings
-LOG_LEVEL = logging.WARNING
-
-# gRPC settings
-GRPC_SERVICE_PORT = 9090
-GRPC_MAX_WORKERS  = 10
-GRPC_GRACE_PERIOD = 60
-
-# REST-API settings
-RESTAPI_SERVICE_PORT = 8080
-RESTAPI_BASE_URL = '/restconf/data'
+# REST-API users
 RESTAPI_USERS = {   # TODO: implement a database of credentials and permissions
     'admin': generate_password_hash('admin'),
 }
-
-# Prometheus settings
-METRICS_PORT = 9192
diff --git a/src/compute/client/ComputeClient.py b/src/compute/client/ComputeClient.py
index ac85500294f0288e7d0e86f77cd7c415f28ef5e5..5c2cfa8b68406e463467ab3a565b6aae6de56e99 100644
--- a/src/compute/client/ComputeClient.py
+++ b/src/compute/client/ComputeClient.py
@@ -13,7 +13,10 @@
 # limitations under the License.
 
 import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
 from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from compute.proto.compute_pb2_grpc import ComputeServiceStub
 from compute.proto.context_pb2 import (
     AuthenticationResult, Empty, Service, ServiceId, ServiceIdList, ServiceStatus, TeraFlowController)
@@ -24,8 +27,10 @@ DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
 RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
 
 class ComputeClient:
-    def __init__(self, address, port):
-        self.endpoint = '{:s}:{:s}'.format(str(address), str(port))
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.COMPUTE)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.COMPUTE)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
         LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
         self.channel = None
         self.stub = None
@@ -37,55 +42,55 @@ class ComputeClient:
         self.stub = ComputeServiceStub(self.channel)
 
     def close(self):
-        if(self.channel is not None): self.channel.close()
+        if self.channel is not None: self.channel.close()
         self.channel = None
         self.stub = None
 
     @RETRY_DECORATOR
     def CheckCredentials(self, request : TeraFlowController) -> AuthenticationResult:
-        LOGGER.debug('CheckCredentials request: {:s}'.format(str(request)))
+        LOGGER.debug('CheckCredentials request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.CheckCredentials(request)
-        LOGGER.debug('CheckCredentials result: {:s}'.format(str(response)))
+        LOGGER.debug('CheckCredentials result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def GetConnectivityServiceStatus(self, request : ServiceId) -> ServiceStatus:
-        LOGGER.debug('GetConnectivityServiceStatus request: {:s}'.format(str(request)))
+        LOGGER.debug('GetConnectivityServiceStatus request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.GetConnectivityServiceStatus(request)
-        LOGGER.debug('GetConnectivityServiceStatus result: {:s}'.format(str(response)))
+        LOGGER.debug('GetConnectivityServiceStatus result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def CreateConnectivityService(self, request : Service) -> ServiceId:
-        LOGGER.debug('CreateConnectivityService request: {:s}'.format(str(request)))
+        LOGGER.debug('CreateConnectivityService request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.CreateConnectivityService(request)
-        LOGGER.debug('CreateConnectivityService result: {:s}'.format(str(response)))
+        LOGGER.debug('CreateConnectivityService result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def EditConnectivityService(self, request : Service) -> ServiceId:
-        LOGGER.debug('EditConnectivityService request: {:s}'.format(str(request)))
+        LOGGER.debug('EditConnectivityService request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.EditConnectivityService(request)
-        LOGGER.debug('EditConnectivityService result: {:s}'.format(str(response)))
+        LOGGER.debug('EditConnectivityService result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def DeleteConnectivityService(self, request : Service) -> Empty:
-        LOGGER.debug('DeleteConnectivityService request: {:s}'.format(str(request)))
+        LOGGER.debug('DeleteConnectivityService request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.DeleteConnectivityService(request)
-        LOGGER.debug('DeleteConnectivityService result: {:s}'.format(str(response)))
+        LOGGER.debug('DeleteConnectivityService result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def GetAllActiveConnectivityServices(self, request : Empty) -> ServiceIdList:
-        LOGGER.debug('GetAllActiveConnectivityServices request: {:s}'.format(str(request)))
+        LOGGER.debug('GetAllActiveConnectivityServices request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.GetAllActiveConnectivityServices(request)
-        LOGGER.debug('GetAllActiveConnectivityServices result: {:s}'.format(str(response)))
+        LOGGER.debug('GetAllActiveConnectivityServices result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def ClearAllConnectivityServices(self, request : Empty) -> Empty:
-        LOGGER.debug('ClearAllConnectivityServices request: {:s}'.format(str(request)))
+        LOGGER.debug('ClearAllConnectivityServices request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.ClearAllConnectivityServices(request)
-        LOGGER.debug('ClearAllConnectivityServices result: {:s}'.format(str(response)))
+        LOGGER.debug('ClearAllConnectivityServices result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
diff --git a/src/compute/service/ComputeService.py b/src/compute/service/ComputeService.py
index 1f523793d28526bdce1dd50cba7677aa04481171..f8476e102fcfbdd12480f8c40a797d6c84d70982 100644
--- a/src/compute/service/ComputeService.py
+++ b/src/compute/service/ComputeService.py
@@ -12,56 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
-from concurrent import futures
-from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
-from grpc_health.v1.health_pb2 import HealthCheckResponse
-from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
 from compute.proto.compute_pb2_grpc import add_ComputeServiceServicer_to_server
 from compute.service.ComputeServiceServicerImpl import ComputeServiceServicerImpl
-from compute.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
-
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
-
-class ComputeService:
-    def __init__(self, address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS,
-                 grace_period=GRPC_GRACE_PERIOD):
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.compute_servicer = None
-        self.health_servicer = None
-        self.pool = None
-        self.server = None
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.debug('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
 
+class ComputeService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.COMPUTE)
+        super().__init__(port, cls_name=cls_name)
         self.compute_servicer = ComputeServiceServicerImpl()
-        add_ComputeServiceServicer_to_server(self.compute_servicer, self.server)
 
-        self.health_servicer = HealthServicer(
-            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
-        add_HealthServicer_to_server(self.health_servicer, self.server)
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        LOGGER.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-        self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
-
-        LOGGER.debug('Service started')
-
-    def stop(self):
-        LOGGER.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.health_servicer.enter_graceful_shutdown()
-        self.server.stop(self.grace_period)
-        LOGGER.debug('Service stopped')
+    def install_servicers(self):
+        add_ComputeServiceServicer_to_server(self.compute_servicer, self.server)
diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py
index cf6f8241010d26d8aff9718b8bc531c2f7f43d22..345b2fdd6950ecda802e8bd1c86e1421b5c60d84 100644
--- a/src/compute/service/__main__.py
+++ b/src/compute/service/__main__.py
@@ -14,10 +14,10 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_setting, wait_for_environment_variables
-from compute.Config import (
-    GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, LOG_LEVEL, RESTAPI_SERVICE_PORT, RESTAPI_BASE_URL,
-    METRICS_PORT)
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    wait_for_environment_variables)
 from .ComputeService import ComputeService
 from .rest_server.RestServer import RestServer
 from .rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
@@ -32,20 +32,13 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
 def main():
     global LOGGER # pylint: disable=global-statement
 
-    grpc_service_port    = get_setting('COMPUTESERVICE_SERVICE_PORT_GRPC', default=GRPC_SERVICE_PORT   )
-    max_workers          = get_setting('MAX_WORKERS',                      default=GRPC_MAX_WORKERS    )
-    grace_period         = get_setting('GRACE_PERIOD',                     default=GRPC_GRACE_PERIOD   )
-    log_level            = get_setting('LOG_LEVEL',                        default=LOG_LEVEL           )
-    restapi_service_port = get_setting('RESTAPI_SERVICE_PORT',             default=RESTAPI_SERVICE_PORT)
-    restapi_base_url     = get_setting('RESTAPI_BASE_URL',                 default=RESTAPI_BASE_URL    )
-    metrics_port         = get_setting('METRICS_PORT',                     default=METRICS_PORT        )
-
+    log_level = get_log_level()
     logging.basicConfig(level=log_level)
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        'CONTEXTSERVICE_SERVICE_HOST', 'CONTEXTSERVICE_SERVICE_PORT_GRPC',
-        'SERVICESERVICE_SERVICE_HOST', 'SERVICESERVICE_SERVICE_PORT_GRPC'
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     signal.signal(signal.SIGINT,  signal_handler)
@@ -54,13 +47,14 @@ def main():
     LOGGER.info('Starting...')
 
     # Start metrics server
+    metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
     # Starting compute service
-    grpc_service = ComputeService(port=grpc_service_port, max_workers=max_workers, grace_period=grace_period)
+    grpc_service = ComputeService()
     grpc_service.start()
 
-    rest_server = RestServer(port=restapi_service_port, base_url=restapi_base_url)
+    rest_server = RestServer()
     register_ietf_l2vpn(rest_server)
     rest_server.start()
 
diff --git a/src/compute/service/rest_server/RestServer.py b/src/compute/service/rest_server/RestServer.py
index 26055f8dfa319ae715e96241a13860fd8283a5aa..d9b6cd9157a77f61f6e3349de690e3314a2b41d9 100644
--- a/src/compute/service/rest_server/RestServer.py
+++ b/src/compute/service/rest_server/RestServer.py
@@ -12,45 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, threading, time
-from flask import Flask, request
-from flask_restful import Api, Resource
-from werkzeug.serving import make_server
-from compute.Config import RESTAPI_BASE_URL, RESTAPI_SERVICE_PORT
-
-logging.getLogger('werkzeug').setLevel(logging.WARNING)
-
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
-
-def log_request(response):
-    timestamp = time.strftime('[%Y-%b-%d %H:%M]')
-    LOGGER.info('%s %s %s %s %s', timestamp, request.remote_addr, request.method, request.full_path, response.status)
-    return response
-
-class RestServer(threading.Thread):
-    def __init__(self, host=BIND_ADDRESS, port=RESTAPI_SERVICE_PORT, base_url=RESTAPI_BASE_URL):
-        threading.Thread.__init__(self, daemon=True)
-        self.host = host
-        self.port = port
-        self.base_url = base_url
-        self.srv = None
-        self.ctx = None
-        self.app = Flask(__name__)
-        self.app.after_request(log_request)
-        self.api = Api(self.app, prefix=self.base_url)
-
-    def add_resource(self, resource : Resource, *urls, **kwargs):
-        self.api.add_resource(resource, *urls, **kwargs)
-
-    def run(self):
-        self.srv = make_server(self.host, self.port, self.app, threaded=True)
-        self.ctx = self.app.app_context()
-        self.ctx.push()
-
-        endpoint = 'http://{:s}:{:s}{:s}'.format(str(self.host), str(self.port), str(self.base_url))
-        LOGGER.info('Listening on {:s}...'.format(str(endpoint)))
-        self.srv.serve_forever()
-
-    def shutdown(self):
-        self.srv.shutdown()
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_baseurl_http, get_service_port_http
+from common.tools.service.GenericRestServer import GenericRestServer
+
+class RestServer(GenericRestServer):
+    def __init__(self, cls_name: str = __name__) -> None:
+        bind_port = get_service_port_http(ServiceNameEnum.COMPUTE)
+        base_url = get_service_baseurl_http(ServiceNameEnum.COMPUTE)
+        super().__init__(bind_port, base_url, cls_name=cls_name)
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
index 27489410f5f0d00b74042f0f4b9da5115b82e782..17ae49478f26a2884d54f95607aafc27c809425d 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
@@ -17,7 +17,6 @@ from flask import request
 from flask.json import jsonify
 from flask_restful import Resource
 from common.Constants import DEFAULT_CONTEXT_UUID
-from common.Settings import get_setting
 from context.client.ContextClient import ContextClient
 from context.proto.context_pb2 import ServiceId, SliceStatusEnum
 from service.client.ServiceClient import ServiceClient
@@ -29,22 +28,16 @@ from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK,
 LOGGER = logging.getLogger(__name__)
 
 class L2VPN_Service(Resource):
-    def __init__(self) -> None:
-        super().__init__()
-        self.context_client = ContextClient(
-            get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
-        self.service_client = ServiceClient(
-            get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC'))
-
     @HTTP_AUTH.login_required
     def get(self, vpn_id : str):
         LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id)))
         LOGGER.debug('Request: {:s}'.format(str(request)))
 
         response = jsonify({})
-
         try:
-            target = get_service(self.context_client, vpn_id)
+            context_client = ContextClient()
+
+            target = get_service(context_client, vpn_id)
             if target is not None:
                 if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member
                     raise Exception('Service retrieval failed. Wrong Service Id was returned')
@@ -53,7 +46,7 @@ class L2VPN_Service(Resource):
                 response.status_code = HTTP_OK if service_status == service_ready_status else HTTP_GATEWAYTIMEOUT
                 return response
 
-            target = get_slice(self.context_client, vpn_id)
+            target = get_slice(context_client, vpn_id)
             if target is not None:
                 if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member
                     raise Exception('Slice retrieval failed. Wrong Slice Id was returned')
@@ -80,7 +73,8 @@ class L2VPN_Service(Resource):
         service_id_request.service_uuid.uuid = vpn_id
 
         try:
-            self.service_client.DeleteService(service_id_request)
+            service_client = ServiceClient()
+            service_client.DeleteService(service_id_request)
             response = jsonify({})
             response.status_code = HTTP_NOCONTENT
         except Exception as e: # pylint: disable=broad-except
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
index 6d39cfe2db2ccba5ca75fe6861599b8ef178a038..e0de1b7321d5618d3a7d33af4a59b47b12482e3a 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
@@ -19,7 +19,6 @@ from flask.json import jsonify
 from flask_restful import Resource
 from werkzeug.exceptions import UnsupportedMediaType
 from common.Constants import DEFAULT_CONTEXT_UUID
-from common.Settings import get_setting
 from service.client.ServiceClient import ServiceClient
 from service.proto.context_pb2 import Service, ServiceStatusEnum, ServiceTypeEnum
 from slice.client.SliceClient import SliceClient
@@ -32,13 +31,6 @@ from .tools.Validator import validate_message
 LOGGER = logging.getLogger(__name__)
 
 class L2VPN_Services(Resource):
-    def __init__(self) -> None:
-        super().__init__()
-        self.service_client = ServiceClient(
-            get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC'))
-        self.slice_client = SliceClient(
-            get_setting('SLICESERVICE_SERVICE_HOST'), get_setting('SLICESERVICE_SERVICE_PORT_GRPC'))
-
     @HTTP_AUTH.login_required
     def get(self):
         return {}
@@ -62,7 +54,8 @@ class L2VPN_Services(Resource):
                     service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
                     service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
 
-                    service_reply = self.service_client.CreateService(service_request)
+                    service_client = ServiceClient()
+                    service_reply = service_client.CreateService(service_request)
                     if service_reply != service_request.service_id: # pylint: disable=no-member
                         raise Exception('Service creation failed. Wrong Service Id was returned')
                 elif vpn_service_type == 'vpls':
@@ -72,7 +65,8 @@ class L2VPN_Services(Resource):
                     slice_request.slice_id.slice_uuid.uuid = vpn_service['vpn-id']
                     slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED
 
-                    slice_reply = self.slice_client.CreateSlice(slice_request)
+                    slice_client = SliceClient()
+                    slice_reply = slice_client.CreateSlice(slice_request)
                     if slice_reply != slice_request.slice_id: # pylint: disable=no-member
                         raise Exception('Slice creation failed. Wrong Slice Id was returned')
 
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index 2c0245b9ae790dd91509f51d821530c589fedf9c..3e7928067daac258431cc8c5b5ccac7257e1cbb8 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -204,25 +204,20 @@ def process_list_site_network_access(
     return response
 
 class L2VPN_SiteNetworkAccesses(Resource):
-    def __init__(self) -> None:
-        super().__init__()
-        self.context_client = ContextClient(
-            get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
-        self.service_client = ServiceClient(
-            get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC'))
-        self.slice_client = SliceClient(
-            get_setting('SLICESERVICE_SERVICE_HOST'), get_setting('SLICESERVICE_SERVICE_PORT_GRPC'))
-
     @HTTP_AUTH.login_required
     def post(self, site_id : str):
         if not request.is_json: raise UnsupportedMediaType('JSON payload is required')
         LOGGER.debug('Site_Id: {:s}'.format(str(site_id)))
-        return process_list_site_network_access(
-            self.context_client, self.service_client, self.slice_client, request.json)
+        context_client = ContextClient()
+        service_client = ServiceClient()
+        slice_client = SliceClient()
+        return process_list_site_network_access(context_client, service_client, slice_client, request.json)
 
     @HTTP_AUTH.login_required
     def put(self, site_id : str):
         if not request.is_json: raise UnsupportedMediaType('JSON payload is required')
         LOGGER.debug('Site_Id: {:s}'.format(str(site_id)))
-        return process_list_site_network_access(
-            self.context_client, self.service_client, self.slice_client, request.json)
+        context_client = ContextClient()
+        service_client = ServiceClient()
+        slice_client = SliceClient()
+        return process_list_site_network_access(context_client, service_client, slice_client, request.json)
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py
index 79be6b7433a4e448febb37777cd0b310d5bf132d..c8b23bcee09bb7f16d75bde02e6f3a7f4ca66b4d 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py
@@ -21,7 +21,7 @@ from .L2VPN_Services import L2VPN_Services
 from .L2VPN_Service import L2VPN_Service
 from .L2VPN_SiteNetworkAccesses import L2VPN_SiteNetworkAccesses
 
-URL_PREFIX      = '/ietf-l2vpn-svc:l2vpn-svc'
+URL_PREFIX = '/ietf-l2vpn-svc:l2vpn-svc'
 
 def _add_resource(rest_server : RestServer, resource : Resource, *urls, **kwargs):
     urls = [(URL_PREFIX + url) for url in urls]
diff --git a/src/compute/tests/MockService_Dependencies.py b/src/compute/tests/MockService_Dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b0e4b3cc49d39a245b76ab5c4099f682ca4c92c
--- /dev/null
+++ b/src/compute/tests/MockService_Dependencies.py
@@ -0,0 +1,58 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import Union
+from common.Constants import ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.tests.MockServicerImpl_Service import MockServicerImpl_Service
+from common.tests.MockServicerImpl_Slice import MockServicerImpl_Slice
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from context.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from service.proto.service_pb2_grpc import add_ServiceServiceServicer_to_server
+from slice.proto.slice_pb2_grpc import add_SliceServiceServicer_to_server
+
+LOCAL_HOST = '127.0.0.1'
+
+SERVICE_CONTEXT = ServiceNameEnum.CONTEXT
+SERVICE_SERVICE = ServiceNameEnum.SERVICE
+SERVICE_SLICE = ServiceNameEnum.SLICE
+
+class MockService_Dependencies(GenericGrpcService):
+    # Mock Service implementing Context, Service and Slice to simplify unitary tests of Compute
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+        self.service_servicer = MockServicerImpl_Service()
+        add_ServiceServiceServicer_to_server(self.service_servicer, self.server)
+
+        self.slice_servicer = MockServicerImpl_Slice()
+        add_SliceServiceServicer_to_server(self.slice_servicer, self.server)
+
+    def configure_env_vars(self):
+        os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
+
+        os.environ[get_env_var_name(SERVICE_SERVICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
+
+        os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
diff --git a/src/compute/tests/PrepareTestScenario.py b/src/compute/tests/PrepareTestScenario.py
new file mode 100644
index 0000000000000000000000000000000000000000..d534a4a28280c80964096a9cb7291c498ebe6b93
--- /dev/null
+++ b/src/compute/tests/PrepareTestScenario.py
@@ -0,0 +1,52 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, pytest, time
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_port_http)
+from compute.service.rest_server.RestServer import RestServer
+from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
+from compute.tests.MockService_Dependencies import MockService_Dependencies
+from .mock_osm.MockOSM import MockOSM
+from .Constants import WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD
+
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+COMPUTE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_http(ServiceNameEnum.COMPUTE)    # avoid privileged ports
+os.environ[get_env_var_name(ServiceNameEnum.COMPUTE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.COMPUTE, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(COMPUTE_SERVICE_PORT)
+
+@pytest.fixture(scope='session')
+def mock_service():
+    _service = MockService_Dependencies(MOCKSERVICE_PORT)
+    _service.configure_env_vars()
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def compute_service_rest(mock_service):  # pylint: disable=redefined-outer-name
+    _rest_server = RestServer()
+    register_ietf_l2vpn(_rest_server)
+    _rest_server.start()
+    time.sleep(1) # bring time for the server to start
+    yield _rest_server
+    _rest_server.shutdown()
+    _rest_server.join()
+
+@pytest.fixture(scope='session')
+def osm_wim(compute_service_rest): # pylint: disable=redefined-outer-name
+    wim_url = 'http://{:s}:{:d}'.format(LOCAL_HOST, COMPUTE_SERVICE_PORT)
+    return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
diff --git a/src/compute/tests/test_unitary.py b/src/compute/tests/test_unitary.py
index 1fbc74ecc7fc49129b789ba99af7276a94bd662e..05c45c1b3554d21084a4a20cac6856b049fe7ca3 100644
--- a/src/compute/tests/test_unitary.py
+++ b/src/compute/tests/test_unitary.py
@@ -12,75 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, os, pytest, time
-from common.tests.MockService import MockService
-from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
-from common.tests.MockServicerImpl_Service import MockServicerImpl_Service
-from common.tests.MockServicerImpl_Slice import MockServicerImpl_Slice
-from compute.Config import RESTAPI_SERVICE_PORT, RESTAPI_BASE_URL
-from compute.service.rest_server.RestServer import RestServer
-from context.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
-from service.proto.service_pb2_grpc import add_ServiceServiceServicer_to_server
-from slice.proto.slice_pb2_grpc import add_SliceServiceServicer_to_server
+import logging
 from .mock_osm.MockOSM import MockOSM
-from .Constants import (
-    SERVICE_CONNECTION_POINTS_1, SERVICE_CONNECTION_POINTS_2, SERVICE_TYPE, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
+from .Constants import SERVICE_CONNECTION_POINTS_1, SERVICE_CONNECTION_POINTS_2, SERVICE_TYPE
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+    # be careful, order of symbols is important here!
+    mock_service, compute_service_rest, osm_wim)
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-LOCALHOST = '127.0.0.1'
-MOCKSERVER_GRPC_PORT = 10000
-COMPUTE_RESTAPI_PORT = 10000 + RESTAPI_SERVICE_PORT # avoid privileged ports
-
-class MockService_ContextService(MockService):
-    # Mock Server implementing Context and Service to simplify unitary tests of Compute
-
-    def __init__(self, cls_name='MockService_Service'):
-        super().__init__(LOCALHOST, MOCKSERVER_GRPC_PORT, cls_name=cls_name)
-
-    # pylint: disable=attribute-defined-outside-init
-    def install_servicers(self):
-        self.context_servicer = MockServicerImpl_Context()
-        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
-        self.service_servicer = MockServicerImpl_Service()
-        add_ServiceServiceServicer_to_server(self.service_servicer, self.server)
-        self.slice_servicer = MockServicerImpl_Slice()
-        add_SliceServiceServicer_to_server(self.slice_servicer, self.server)
-
-os.environ['CONTEXTSERVICE_SERVICE_HOST'] = LOCALHOST
-os.environ['CONTEXTSERVICE_SERVICE_PORT_GRPC'] = str(MOCKSERVER_GRPC_PORT)
-os.environ['SERVICESERVICE_SERVICE_HOST'] = LOCALHOST
-os.environ['SERVICESERVICE_SERVICE_PORT_GRPC'] = str(MOCKSERVER_GRPC_PORT)
-os.environ['SLICESERVICE_SERVICE_HOST'] = LOCALHOST
-os.environ['SLICESERVICE_SERVICE_PORT_GRPC'] = str(MOCKSERVER_GRPC_PORT)
-
-# NBI Plugin IETF L2VPN requires environment variables CONTEXTSERVICE_SERVICE_HOST, CONTEXTSERVICE_SERVICE_PORT_GRPC,
-# SERVICESERVICE_SERVICE_HOST, and SERVICESERVICE_SERVICE_PORT_GRPC to work properly.
-# pylint: disable=wrong-import-position,ungrouped-imports
-from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
-
-@pytest.fixture(scope='session')
-def mockservice():
-    _service = MockService_ContextService()
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def compute_service_rest(mockservice):  # pylint: disable=redefined-outer-name
-    _rest_server = RestServer(port=COMPUTE_RESTAPI_PORT, base_url=RESTAPI_BASE_URL)
-    register_ietf_l2vpn(_rest_server)
-    _rest_server.start()
-    time.sleep(1) # bring time for the server to start
-    yield _rest_server
-    _rest_server.shutdown()
-    _rest_server.join()
-
-@pytest.fixture(scope='session')
-def osm_wim(compute_service_rest): # pylint: disable=redefined-outer-name
-    wim_url = 'http://{:s}:{:d}'.format(LOCALHOST, COMPUTE_RESTAPI_PORT)
-    return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
 
 def test_compute_create_connectivity_service_rest_api(osm_wim : MockOSM): # pylint: disable=redefined-outer-name
     osm_wim.create_connectivity_service(SERVICE_TYPE, SERVICE_CONNECTION_POINTS_1)
diff --git a/src/context/Config.py b/src/context/Config.py
index 328610fc81561f60d97b8ef3080930c4affce20e..6f5d1dc0b347dc5db27a2cfae973a4e5bdf7b4cc 100644
--- a/src/context/Config.py
+++ b/src/context/Config.py
@@ -12,22 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-
-# General settings
-LOG_LEVEL = logging.INFO
-
-# gRPC settings
-GRPC_SERVICE_PORT = 1010
-GRPC_MAX_WORKERS  = 200 # multiple clients might keep connections alive for Get*Events() RPC methods
-GRPC_GRACE_PERIOD = 60
-
-# REST-API settings
-RESTAPI_SERVICE_PORT = 8080
-RESTAPI_BASE_URL = '/api'
-
-# Prometheus settings
-METRICS_PORT = 9192
-
 # Autopopulate the component with fake data for testing purposes?
 POPULATE_FAKE_DATA = False
diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py
index 3206e4a366ef2f6cb7d3aeb366b287572b8d49da..34214fac00d03f5d7595bf118b35026642ba9426 100644
--- a/src/context/client/ContextClient.py
+++ b/src/context/client/ContextClient.py
@@ -12,8 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Iterator
 import grpc, logging
+from typing import Iterator
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.proto.context_pb2 import (
@@ -29,9 +31,11 @@ DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
 RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
 
 class ContextClient:
-    def __init__(self, address, port):
-        self.endpoint = '{:s}:{:s}'.format(str(address), str(port))
-        LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.CONTEXT)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.CONTEXT)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
         self.channel = None
         self.stub = None
         self.connect()
diff --git a/src/context/service/Populate.py b/src/context/service/Populate.py
index f4630182d864a891095caa6689dde9f656ea1c0e..ace630900d82fc36a82d290f12e466018dc82587 100644
--- a/src/context/service/Populate.py
+++ b/src/context/service/Populate.py
@@ -20,8 +20,8 @@ from context.tests.Objects import (
     LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R3,
     SERVICE_R2_R3)
 
-def populate(address, port):
-    client = ContextClient(address=address, port=port)
+def populate(host=None, port=None):
+    client = ContextClient(host=host, port=port)
 
     client.SetContext(Context(**CONTEXT))
     client.SetTopology(Topology(**TOPOLOGY))
diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py
index 180a1f44cb6a37b487e6bce0a13706952ff73bc2..53754caf4f9d2621ed8a6fdfd325d42f77f44a4f 100644
--- a/src/context/service/__main__.py
+++ b/src/context/service/__main__.py
@@ -14,17 +14,15 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_setting
+from common.Settings import get_log_level, get_metrics_port, get_setting
 from common.orm.Database import Database
 from common.orm.Factory import get_database_backend
 from common.message_broker.Factory import get_messagebroker_backend
 from common.message_broker.MessageBroker import MessageBroker
-from context.Config import (
-    GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, LOG_LEVEL, POPULATE_FAKE_DATA, RESTAPI_SERVICE_PORT,
-    RESTAPI_BASE_URL, METRICS_PORT)
+from context.Config import POPULATE_FAKE_DATA
 from .grpc_server.ContextService import ContextService
 from .rest_server.Resources import RESOURCES
-from .rest_server.Server import Server
+from .rest_server.RestServer import RestServer
 from .Populate import populate
 
 terminate = threading.Event()
@@ -37,16 +35,7 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
 def main():
     global LOGGER # pylint: disable=global-statement
 
-    grpc_service_port    = get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC', default=GRPC_SERVICE_PORT   )
-    max_workers          = get_setting('MAX_WORKERS',                      default=GRPC_MAX_WORKERS    )
-    grace_period         = get_setting('GRACE_PERIOD',                     default=GRPC_GRACE_PERIOD   )
-    log_level            = get_setting('LOG_LEVEL',                        default=LOG_LEVEL           )
-    restapi_service_port = get_setting('CONTEXTSERVICE_SERVICE_PORT_HTTP', default=RESTAPI_SERVICE_PORT)
-    restapi_base_url     = get_setting('RESTAPI_BASE_URL',                 default=RESTAPI_BASE_URL    )
-    metrics_port         = get_setting('METRICS_PORT',                     default=METRICS_PORT        )
-    populate_fake_data   = get_setting('POPULATE_FAKE_DATA',               default=POPULATE_FAKE_DATA  )
-    if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'})
-
+    log_level = get_log_level()
     logging.basicConfig(level=log_level)
     LOGGER = logging.getLogger(__name__)
 
@@ -56,6 +45,7 @@ def main():
     LOGGER.info('Starting...')
 
     # Start metrics server
+    metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
     # Get database instance
@@ -65,18 +55,19 @@ def main():
     messagebroker = MessageBroker(get_messagebroker_backend())
 
     # Starting context service
-    grpc_service = ContextService(
-        database, messagebroker, port=grpc_service_port, max_workers=max_workers, grace_period=grace_period)
+    grpc_service = ContextService(database, messagebroker)
     grpc_service.start()
 
-    rest_server = Server(port=restapi_service_port, base_url=restapi_base_url)
+    rest_server = RestServer()
     for endpoint_name, resource_class, resource_url in RESOURCES:
         rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,))
     rest_server.start()
 
+    populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA)
+    if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'})
     if populate_fake_data:
         LOGGER.info('Populating fake data...')
-        populate('127.0.0.1', grpc_service_port)
+        populate(host='127.0.0.1', port=grpc_service.bind_port)
         LOGGER.info('Fake Data populated')
 
     # Wait for Ctrl+C or termination signal
diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/grpc_server/ContextService.py
index 87ca94a70aa2e1733b8ec443c70a4623d2e0c471..c338b0f0d499e3c9a2a32a8ca77e386333af0456 100644
--- a/src/context/service/grpc_server/ContextService.py
+++ b/src/context/service/grpc_server/ContextService.py
@@ -12,61 +12,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc
-import logging
-from concurrent import futures
-from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
-from grpc_health.v1.health_pb2 import HealthCheckResponse
-from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
-from context.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.message_broker.MessageBroker import MessageBroker
+from common.orm.Database import Database
+from common.tools.service.GenericGrpcService import GenericGrpcService
 from context.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 from .ContextServiceServicerImpl import ContextServiceServicerImpl
 
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
+# Custom gRPC settings
+GRPC_MAX_WORKERS = 200 # multiple clients might keep connections alive for Get*Events() RPC methods
 
-class ContextService:
-    def __init__(
-        self, database, messagebroker, address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS,
-        grace_period=GRPC_GRACE_PERIOD):
+class ContextService(GenericGrpcService):
+    def __init__(self, database : Database, messagebroker : MessageBroker, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.CONTEXT)
+        super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
+        self.context_servicer = ContextServiceServicerImpl(database, messagebroker)
 
-        self.database = database
-        self.messagebroker = messagebroker
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.context_servicer = None
-        self.health_servicer = None
-        self.pool = None
-        self.server = None
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
-
-        self.context_servicer = ContextServiceServicerImpl(self.database, self.messagebroker)
+    def install_servicers(self):
         add_ContextServiceServicer_to_server(self.context_servicer, self.server)
-
-        self.health_servicer = HealthServicer(
-            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
-        add_HealthServicer_to_server(self.health_servicer, self.server)
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        LOGGER.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-        self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
-
-        LOGGER.debug('Service started')
-
-    def stop(self):
-        LOGGER.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.health_servicer.enter_graceful_shutdown()
-        self.server.stop(self.grace_period)
-        LOGGER.debug('Service stopped')
diff --git a/src/context/service/rest_server/RestServer.py b/src/context/service/rest_server/RestServer.py
new file mode 100644
index 0000000000000000000000000000000000000000..289e92a3c1b74e207a261b133130a551c3c55918
--- /dev/null
+++ b/src/context/service/rest_server/RestServer.py
@@ -0,0 +1,23 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_baseurl_http, get_service_port_http
+from common.tools.service.GenericRestServer import GenericRestServer
+
+class RestServer(GenericRestServer):
+    def __init__(self, cls_name: str = __name__) -> None:
+        bind_port = get_service_port_http(ServiceNameEnum.CONTEXT)
+        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
+        super().__init__(bind_port, base_url, cls_name=cls_name)
diff --git a/src/context/service/rest_server/Server.py b/src/context/service/rest_server/Server.py
deleted file mode 100644
index ac4888d41bd9a84c57fc2d2f308bde4558787cbc..0000000000000000000000000000000000000000
--- a/src/context/service/rest_server/Server.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, threading
-from flask import Flask
-from flask_restful import Api
-from werkzeug.serving import make_server
-from context.Config import RESTAPI_BASE_URL, RESTAPI_SERVICE_PORT
-
-logging.getLogger('werkzeug').setLevel(logging.WARNING)
-
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
-
-class Server(threading.Thread):
-    def __init__(self, host=BIND_ADDRESS, port=RESTAPI_SERVICE_PORT, base_url=RESTAPI_BASE_URL):
-        threading.Thread.__init__(self, daemon=True)
-        self.host = host
-        self.port = port
-        self.base_url = base_url
-        self.srv = None
-        self.ctx = None
-        self.app = Flask(__name__)
-        self.api = Api(self.app, prefix=self.base_url)
-
-    def add_resource(self, resource, *urls, **kwargs):
-        self.api.add_resource(resource, *urls, **kwargs)
-
-    def run(self):
-        self.srv = make_server(self.host, self.port, self.app, threaded=True)
-        self.ctx = self.app.app_context()
-        self.ctx.push()
-
-        endpoint = 'http://{:s}:{:s}{:s}'.format(str(self.host), str(self.port), str(self.base_url))
-        LOGGER.info('Listening on {:s}...'.format(str(endpoint)))
-        self.srv.serve_forever()
-
-    def shutdown(self):
-        self.srv.shutdown()
diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py
index 10f44d9ad87a71d5935151f4ae724e9d04b5d5ce..0705477aece433cc7d796921afbd40e6d4df05eb 100644
--- a/src/context/tests/test_unitary.py
+++ b/src/context/tests/test_unitary.py
@@ -15,7 +15,8 @@
 # pylint: disable=too-many-lines
 import copy, grpc, logging, os, pytest, requests, time, urllib
 from typing import Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_baseurl_http, get_service_port_grpc, get_service_port_http
 from common.orm.Database import Database
 from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
 from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
@@ -25,8 +26,6 @@ from common.type_checkers.Assertions import (
     validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids,
     validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology,
     validate_topology_ids)
-from context.Config import (
-    GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, RESTAPI_SERVICE_PORT, RESTAPI_BASE_URL)
 from context.client.ContextClient import ContextClient
 from context.client.EventsCollector import EventsCollector
 from context.proto.context_pb2 import (
@@ -37,7 +36,7 @@ from context.service.database.Tools import (
     FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher)
 from context.service.grpc_server.ContextService import ContextService
 from context.service.Populate import populate
-from context.service.rest_server.Server import Server as RestServer
+from context.service.rest_server.RestServer import RestServer
 from context.service.rest_server.Resources import RESOURCES
 from .Objects import (
     CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID,
@@ -48,10 +47,15 @@ from .Objects import (
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-GRPC_PORT    = 10000 + GRPC_SERVICE_PORT    # avoid privileged ports
-RESTAPI_PORT = 10000 + RESTAPI_SERVICE_PORT # avoid privileged ports
+LOCAL_HOST = '127.0.0.1'
+GRPC_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.CONTEXT)   # avoid privileged ports
+HTTP_PORT = 10000 + get_service_port_http(ServiceNameEnum.CONTEXT)   # avoid privileged ports
 
-DEFAULT_REDIS_SERVICE_HOST = '127.0.0.1'
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT)
+
+DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST
 DEFAULT_REDIS_SERVICE_PORT = 6379
 DEFAULT_REDIS_DATABASE_ID  = 0
 
@@ -78,9 +82,7 @@ def context_db_mb(request) -> Tuple[Database, MessageBroker]:
 
 @pytest.fixture(scope='session')
 def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
-    _service = ContextService(
-        context_db_mb[0], context_db_mb[1], port=GRPC_PORT, max_workers=GRPC_MAX_WORKERS,
-        grace_period=GRPC_GRACE_PERIOD)
+    _service = ContextService(context_db_mb[0], context_db_mb[1])
     _service.start()
     yield _service
     _service.stop()
@@ -88,7 +90,7 @@ def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pyli
 @pytest.fixture(scope='session')
 def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
     database = context_db_mb[0]
-    _rest_server = RestServer(port=RESTAPI_PORT, base_url=RESTAPI_BASE_URL)
+    _rest_server = RestServer()
     for endpoint_name, resource_class, resource_url in RESOURCES:
         _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,))
     _rest_server.start()
@@ -99,12 +101,13 @@ def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pyli
 
 @pytest.fixture(scope='session')
 def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name
-    _client = ContextClient(address='127.0.0.1', port=GRPC_PORT)
+    _client = ContextClient()
     yield _client
     _client.close()
 
 def do_rest_request(url : str):
-    request_url = 'http://127.0.0.1:{:s}{:s}{:s}'.format(str(RESTAPI_PORT), str(RESTAPI_BASE_URL), url)
+    base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
+    request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
     LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
     reply = requests.get(request_url)
     LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
@@ -1172,7 +1175,7 @@ def test_rest_populate_database(
     ):
     database = context_db_mb[0]
     database.clear_all()
-    populate('127.0.0.1', GRPC_PORT)
+    populate(LOCAL_HOST, GRPC_PORT)
 
 def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
     reply = do_rest_request('/context_ids')
diff --git a/src/device/.gitlab-ci.yml b/src/device/.gitlab-ci.yml
index 90f00f7a168404dc5196c923e76430a6d029f6c9..4e337f37e17006cee0943dfadce6d933b656c4bf 100644
--- a/src/device/.gitlab-ci.yml
+++ b/src/device/.gitlab-ci.yml
@@ -54,7 +54,7 @@ unit test device:
     - sleep 5
     - docker ps -a
     - docker logs $IMAGE_NAME
-    - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
+    - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_emulated.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
     - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
diff --git a/src/device/Config.py b/src/device/Config.py
index 415ae7b01ced740a0dc09f215ae71ad553a2672e..70a33251242c51f49140e596b8208a19dd5245f7 100644
--- a/src/device/Config.py
+++ b/src/device/Config.py
@@ -12,21 +12,3 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-
-# General settings
-LOG_LEVEL = logging.WARNING
-
-# gRPC settings
-GRPC_SERVICE_PORT = 2020
-GRPC_MAX_WORKERS  = 10
-GRPC_GRACE_PERIOD = 60
-
-# Prometheus settings
-METRICS_PORT = 9192
-
-# Dependency micro-service connection settings
-CONTEXT_SERVICE_HOST = '127.0.0.1'
-CONTEXT_SERVICE_PORT = 1010
-MONITORING_SERVICE_HOST = '127.0.0.1'
-MONITORING_SERVICE_PORT = 7070
diff --git a/src/device/client/DeviceClient.py b/src/device/client/DeviceClient.py
index 2a9512411a2d9e55e4c5b0fa75d48fc54d810713..7fe54cb23babee7a2f3bc9d21082732d924f5eff 100644
--- a/src/device/client/DeviceClient.py
+++ b/src/device/client/DeviceClient.py
@@ -13,7 +13,10 @@
 # limitations under the License.
 
 import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
 from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from device.proto.context_pb2 import Device, DeviceConfig, DeviceId, Empty
 from device.proto.device_pb2 import MonitoringSettings
 from device.proto.device_pb2_grpc import DeviceServiceStub
@@ -24,8 +27,10 @@ DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
 RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
 
 class DeviceClient:
-    def __init__(self, address, port):
-        self.endpoint = '{:s}:{:s}'.format(str(address), str(port))
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.DEVICE)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.DEVICE)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
         LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
         self.channel = None
         self.stub = None
@@ -37,41 +42,41 @@ class DeviceClient:
         self.stub = DeviceServiceStub(self.channel)
 
     def close(self):
-        if(self.channel is not None): self.channel.close()
+        if self.channel is not None: self.channel.close()
         self.channel = None
         self.stub = None
 
     @RETRY_DECORATOR
     def AddDevice(self, request : Device) -> DeviceId:
-        LOGGER.debug('AddDevice request: {:s}'.format(str(request)))
+        LOGGER.debug('AddDevice request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.AddDevice(request)
-        LOGGER.debug('AddDevice result: {:s}'.format(str(response)))
+        LOGGER.debug('AddDevice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def ConfigureDevice(self, request : Device) -> DeviceId:
-        LOGGER.debug('ConfigureDevice request: {:s}'.format(str(request)))
+        LOGGER.debug('ConfigureDevice request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.ConfigureDevice(request)
-        LOGGER.debug('ConfigureDevice result: {:s}'.format(str(response)))
+        LOGGER.debug('ConfigureDevice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def DeleteDevice(self, request : DeviceId) -> Empty:
-        LOGGER.debug('DeleteDevice request: {:s}'.format(str(request)))
+        LOGGER.debug('DeleteDevice request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.DeleteDevice(request)
-        LOGGER.debug('DeleteDevice result: {:s}'.format(str(response)))
+        LOGGER.debug('DeleteDevice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def GetInitialConfig(self, request : DeviceId) -> DeviceConfig:
-        LOGGER.debug('GetInitialConfig request: {:s}'.format(str(request)))
+        LOGGER.debug('GetInitialConfig request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.GetInitialConfig(request)
-        LOGGER.debug('GetInitialConfig result: {:s}'.format(str(response)))
+        LOGGER.debug('GetInitialConfig result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
     def MonitorDeviceKpi(self, request : MonitoringSettings) -> Empty:
-        LOGGER.debug('MonitorDeviceKpi request: {:s}'.format(str(request)))
+        LOGGER.debug('MonitorDeviceKpi request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.MonitorDeviceKpi(request)
-        LOGGER.debug('MonitorDeviceKpi result: {:s}'.format(str(response)))
+        LOGGER.debug('MonitorDeviceKpi result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
diff --git a/src/device/service/DeviceService.py b/src/device/service/DeviceService.py
index bb2cc09535af579a24cf05687d2883d81c7c914b..4f9b032e8a224e89e48daebf52687cc892ca534a 100644
--- a/src/device/service/DeviceService.py
+++ b/src/device/service/DeviceService.py
@@ -12,76 +12,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
-from concurrent import futures
-from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
-from grpc_health.v1.health_pb2 import HealthCheckResponse
-from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
 from common.orm.backend.BackendEnum import BackendEnum
 from common.orm.Database import Database
 from common.orm.Factory import get_database_backend
-from context.client.ContextClient import ContextClient
-from device.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
+from common.tools.service.GenericGrpcService import GenericGrpcService
 from device.proto.device_pb2_grpc import add_DeviceServiceServicer_to_server
-from monitoring.client.monitoring_client import MonitoringClient
 from .driver_api.DriverInstanceCache import DriverInstanceCache
 from .DeviceServiceServicerImpl import DeviceServiceServicerImpl
 from .MonitoringLoops import MonitoringLoops
 
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
-
-class DeviceService:
-    def __init__(
-        self, context_client : ContextClient, monitoring_client : MonitoringClient,
-        driver_instance_cache : DriverInstanceCache,
-        address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD):
-
-        self.context_client = context_client
-        self.monitoring_client = monitoring_client
-        self.driver_instance_cache = driver_instance_cache
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.device_servicer = None
-        self.health_servicer = None
-        self.pool = None
-        self.server = None
-
-        self.database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
-        self.monitoring_loops = MonitoringLoops(monitoring_client, self.database)
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
+class DeviceService(GenericGrpcService):
+    def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.DEVICE)
+        super().__init__(port, cls_name=cls_name)
+        database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
+        self.monitoring_loops = MonitoringLoops(database)
+        self.device_servicer = DeviceServiceServicerImpl(database, driver_instance_cache, self.monitoring_loops)
 
+    def install_servicers(self):
         self.monitoring_loops.start()
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
-
-        self.device_servicer = DeviceServiceServicerImpl(
-            self.context_client, self.database, self.driver_instance_cache, self.monitoring_loops)
         add_DeviceServiceServicer_to_server(self.device_servicer, self.server)
 
-        self.health_servicer = HealthServicer(
-            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
-        add_HealthServicer_to_server(self.health_servicer, self.server)
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        LOGGER.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-        self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
-
-        LOGGER.debug('Service started')
-
     def stop(self):
-        LOGGER.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.health_servicer.enter_graceful_shutdown()
-        self.server.stop(self.grace_period)
+        super().stop()
         self.monitoring_loops.stop()
-        LOGGER.debug('Service stopped')
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index 8e00b344f0462ae56b289cfc5d33e6c1b1c42b7e..e328c76cdd29147163adc9713c86fc0efc31555d 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -49,11 +49,10 @@ METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
 
 class DeviceServiceServicerImpl(DeviceServiceServicer):
     def __init__(
-        self, context_client : ContextClient, database : Database, driver_instance_cache : DriverInstanceCache,
-        monitoring_loops : MonitoringLoops):
-
+        self, database : Database, driver_instance_cache : DriverInstanceCache, monitoring_loops : MonitoringLoops
+    ) -> None:
         LOGGER.debug('Creating Servicer...')
-        self.context_client = context_client
+        self.context_client = ContextClient()
         self.database = database
         self.driver_instance_cache = driver_instance_cache
         self.monitoring_loops = monitoring_loops
diff --git a/src/device/service/MonitoringLoops.py b/src/device/service/MonitoringLoops.py
index e5b671f7f06beade5ab9f8b6539527999d49b9e8..eff634c75537ed26fd77f02f6adb85f0b0555aa8 100644
--- a/src/device/service/MonitoringLoops.py
+++ b/src/device/service/MonitoringLoops.py
@@ -18,7 +18,7 @@ from typing import Dict
 from common.orm.Database import Database
 from common.orm.HighLevel import get_object
 from common.orm.backend.Tools import key_to_str
-from monitoring.client.monitoring_client import MonitoringClient
+from monitoring.client.MonitoringClient import MonitoringClient
 from monitoring.proto.monitoring_pb2 import Kpi
 from .database.KpiModel import KpiModel
 from .database.RelationModels import EndPointMonitorKpiModel
@@ -55,8 +55,8 @@ class MonitoringLoop:
         self._collector_thread.join()
 
 class MonitoringLoops:
-    def __init__(self, monitoring_client : MonitoringClient, database : Database) -> None:
-        self._monitoring_client = monitoring_client
+    def __init__(self, database : Database) -> None:
+        self._monitoring_client = MonitoringClient()
         self._database = database
         self._samples_queue = queue.Queue()
         self._running = threading.Event()
@@ -82,7 +82,6 @@ class MonitoringLoops:
 
     def start(self):
         self._exporter_thread.start()
-        self._running.set()
 
     @property
     def is_running(self): return self._running.is_set()
@@ -96,6 +95,7 @@ class MonitoringLoops:
             LOGGER.error('[MonitoringLoops:_export] Database not set. Terminating Exporter.')
             return
 
+        self._running.set()
         while not self._terminate.is_set():
             try:
                 sample = self._samples_queue.get(block=True, timeout=QUEUE_GET_WAIT_TIMEOUT)
@@ -149,3 +149,5 @@ class MonitoringLoops:
                 }))
             except: # pylint: disable=bare-except
                 LOGGER.exception('Unable to format/send Kpi')
+
+        self._running.clear()
diff --git a/src/device/service/__main__.py b/src/device/service/__main__.py
index 0e92cabba4ddae84a9d4cd938c7e8b31ab4f0531..1f0adfa8f1dd8b3e307ed202967b1d5195171f11 100644
--- a/src/device/service/__main__.py
+++ b/src/device/service/__main__.py
@@ -14,12 +14,10 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_setting, wait_for_environment_variables
-from context.client.ContextClient import ContextClient
-from device.Config import (
-    CONTEXT_SERVICE_HOST, CONTEXT_SERVICE_PORT, GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, LOG_LEVEL,
-    METRICS_PORT, MONITORING_SERVICE_HOST, MONITORING_SERVICE_PORT)
-from monitoring.client.monitoring_client import MonitoringClient
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    wait_for_environment_variables)
 from .DeviceService import DeviceService
 from .driver_api.DriverFactory import DriverFactory
 from .driver_api.DriverInstanceCache import DriverInstanceCache
@@ -35,12 +33,7 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
 def main():
     global LOGGER # pylint: disable=global-statement
 
-    grpc_service_port       = get_setting('DEVICESERVICE_SERVICE_PORT_GRPC',     default=GRPC_SERVICE_PORT      )
-    max_workers             = get_setting('MAX_WORKERS',                         default=GRPC_MAX_WORKERS       )
-    grace_period            = get_setting('GRACE_PERIOD',                        default=GRPC_GRACE_PERIOD      )
-    log_level               = get_setting('LOG_LEVEL',                           default=LOG_LEVEL              )
-    metrics_port            = get_setting('METRICS_PORT',                        default=METRICS_PORT           )
-
+    log_level = get_log_level()
     logging.basicConfig(level=log_level)
     logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
     logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
@@ -48,43 +41,25 @@ def main():
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        'CONTEXTSERVICE_SERVICE_HOST', 'CONTEXTSERVICE_SERVICE_PORT_GRPC',
-        'MONITORINGSERVICE_SERVICE_HOST', 'MONITORINGSERVICE_SERVICE_PORT_GRPC'
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
-    context_service_host    = get_setting('CONTEXTSERVICE_SERVICE_HOST',         default=CONTEXT_SERVICE_HOST   )
-    context_service_port    = get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC',    default=CONTEXT_SERVICE_PORT   )
-    monitoring_service_host = get_setting('MONITORINGSERVICE_SERVICE_HOST',      default=MONITORING_SERVICE_HOST)
-    monitoring_service_port = get_setting('MONITORINGSERVICE_SERVICE_PORT_GRPC', default=MONITORING_SERVICE_PORT)
-
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
     LOGGER.info('Starting...')
 
     # Start metrics server
+    metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
-    # Initialize Context Client
-    if context_service_host is None or context_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Context component'.format(
-            str(context_service_host), str(context_service_port)))
-    context_client = ContextClient(context_service_host, context_service_port)
-
-    # Initialize Monitoring Client
-    if monitoring_service_host is None or monitoring_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Monitoring component'.format(
-            str(monitoring_service_host), str(monitoring_service_port)))
-    monitoring_client = MonitoringClient(monitoring_service_host, monitoring_service_port)
-
     # Initialize Driver framework
     driver_factory = DriverFactory(DRIVERS)
     driver_instance_cache = DriverInstanceCache(driver_factory)
 
     # Starting device service
-    grpc_service = DeviceService(
-        context_client, monitoring_client, driver_instance_cache, port=grpc_service_port, max_workers=max_workers,
-        grace_period=grace_period)
+    grpc_service = DeviceService(driver_instance_cache)
     grpc_service.start()
 
     # Wait for Ctrl+C or termination signal
diff --git a/src/device/service/database/EndPointModel.py b/src/device/service/database/EndPointModel.py
index 286a51db69738782a6d1acaed5b1d7846ac67b2b..84d0c97073481af162b1e66f7e35c93bc6e1eed5 100644
--- a/src/device/service/database/EndPointModel.py
+++ b/src/device/service/database/EndPointModel.py
@@ -15,6 +15,7 @@
 import logging
 from typing import Dict, List
 from common.orm.Database import Database
+from common.orm.HighLevel import update_or_create_object
 from common.orm.backend.Tools import key_to_str
 from common.orm.fields.EnumeratedField import EnumeratedField
 from common.orm.fields.ForeignKeyField import ForeignKeyField
@@ -72,9 +73,14 @@ def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc
     db_endpoint_pk = db_endpoint.pk
     for kpi_sample_type in grpc_endpoint_kpi_sample_types:
         orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type)
-        str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name])
-        db_endpoint_kpi_sample_type = EndPointMonitorModel(database, str_endpoint_kpi_sample_type_key)
-        db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint
-        db_endpoint_kpi_sample_type.resource_key = '' # during initialization, allow empty value
-        db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type
-        db_endpoint_kpi_sample_type.save()
+        str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)])
+        #db_endpoint_kpi_sample_type = EndPointMonitorModel(database, str_endpoint_kpi_sample_type_key)
+        #db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint
+        #db_endpoint_kpi_sample_type.resource_key = '' # during initialization, allow empty value
+        #db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type
+        #db_endpoint_kpi_sample_type.save()
+        update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, {
+            'endpoint_fk'    : db_endpoint,
+            #'resource_key'   : '', # during initialization, allow empty value
+            'kpi_sample_type': orm_kpi_sample_type,
+        })
diff --git a/src/device/service/driver_api/DriverFactory.py b/src/device/service/driver_api/DriverFactory.py
index 1e79b4ba45593d3f24f7193648010071d766ec58..b2b6c467a9d7c941a430e7bc7aaa1ab123053750 100644
--- a/src/device/service/driver_api/DriverFactory.py
+++ b/src/device/service/driver_api/DriverFactory.py
@@ -76,6 +76,7 @@ class DriverFactory:
                 field_candidate_driver_classes = field_candidate_driver_classes.union(field_indice_drivers)
 
             if candidate_driver_classes is None:
+                if len(field_candidate_driver_classes) == 0: continue
                 candidate_driver_classes = {k:1 for k in field_candidate_driver_classes}
             else:
                 for candidate_driver_class in candidate_driver_classes:
diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py
index 664b52821224f4d53b17cb5e10e44461ac7b75f4..40912f50b98f1d5fc9555d87a4855a12ab8e0c07 100644
--- a/src/device/service/drivers/__init__.py
+++ b/src/device/service/drivers/__init__.py
@@ -23,6 +23,11 @@ from .microwave.IETFApiDriver import IETFApiDriver
 DRIVERS = [
     (EmulatedDriver, [
         {
+            # Driver==unspecified & no device type specified => use Emulated
+            FilterFieldEnum.DRIVER: ORM_DeviceDriverEnum.UNDEFINED,
+        },
+        {
+            # Emulated OLS/Packet Router, specifying Undefined/OpenConfig/TAPI Driver => use EmulatedDriver
             FilterFieldEnum.DEVICE_TYPE: [
                 DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM,
                 DeviceTypeEnum.EMULATED_PACKET_ROUTER,
@@ -36,18 +41,21 @@ DRIVERS = [
     ]),
     (OpenConfigDriver, [
         {
+            # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver
             FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER,
             FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.OPENCONFIG,
         }
     ]),
     (TransportApiDriver, [
         {
+            # Real OLS, specifying TAPI Driver => use TransportApiDriver
             FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPTICAL_LINE_SYSTEM,
             FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.TRANSPORT_API,
         }
     ]),
     (P4Driver, [
         {
+            # Real P4 Switch, specifying P4 Driver => use P4Driver
             FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH,
             FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.P4,
         }
diff --git a/src/device/tests/MockMonitoringService.py b/src/device/tests/MockMonitoringService.py
deleted file mode 100644
index 3e8550058daa905517f26a659a08c66db1172d74..0000000000000000000000000000000000000000
--- a/src/device/tests/MockMonitoringService.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import grpc, logging
-from concurrent import futures
-from queue import Queue
-from monitoring.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
-from monitoring.proto.monitoring_pb2_grpc import  add_MonitoringServiceServicer_to_server
-from .MockMonitoringServiceServicerImpl import MockMonitoringServiceServicerImpl
-
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
-
-class MockMonitoringService:
-    def __init__(
-        self, address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS,
-        grace_period=GRPC_GRACE_PERIOD):
-
-        self.queue_samples = Queue()
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.monitoring_servicer = None
-        self.pool = None
-        self.server = None
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
-
-        self.monitoring_servicer = MockMonitoringServiceServicerImpl(self.queue_samples)
-        add_MonitoringServiceServicer_to_server(self.monitoring_servicer, self.server)
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        LOGGER.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-
-        LOGGER.debug('Service started')
-
-    def stop(self):
-        LOGGER.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.server.stop(self.grace_period)
-        LOGGER.debug('Service stopped')
diff --git a/src/device/tests/MockService_Dependencies.py b/src/device/tests/MockService_Dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b2a7788fa3fa242edb9cc7c4b10e22244d7c99a
--- /dev/null
+++ b/src/device/tests/MockService_Dependencies.py
@@ -0,0 +1,50 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, queue
+from typing import Union
+from common.Constants import ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.tests.MockServicerImpl_Monitoring import MockServicerImpl_Monitoring
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from context.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from monitoring.proto.monitoring_pb2_grpc import add_MonitoringServiceServicer_to_server
+
+LOCAL_HOST = '127.0.0.1'
+
+SERVICE_CONTEXT = ServiceNameEnum.CONTEXT
+SERVICE_MONITORING = ServiceNameEnum.MONITORING
+
+class MockService_Dependencies(GenericGrpcService):
+    # Mock Service implementing Context and Monitoring to simplify unitary tests of Device
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+        self.queue_samples = queue.Queue()
+        self.monitoring_servicer = MockServicerImpl_Monitoring(queue_samples=self.queue_samples)
+        add_MonitoringServiceServicer_to_server(self.monitoring_servicer, self.server)
+
+    def configure_env_vars(self):
+        os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
+
+        os.environ[get_env_var_name(SERVICE_MONITORING, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_MONITORING, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
diff --git a/src/device/tests/PrepareTestScenario.py b/src/device/tests/PrepareTestScenario.py
new file mode 100644
index 0000000000000000000000000000000000000000..08991221a3f5121c587ecfd4644a6b28156ccefd
--- /dev/null
+++ b/src/device/tests/PrepareTestScenario.py
@@ -0,0 +1,80 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest, os
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from context.client.ContextClient import ContextClient
+from context.proto.context_pb2 import Context, Topology
+from device.client.DeviceClient import DeviceClient
+from device.service.DeviceService import DeviceService
+from device.service.driver_api.DriverFactory import DriverFactory
+from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
+from device.service.drivers import DRIVERS
+from device.tests.CommonObjects import CONTEXT, TOPOLOGY
+from device.tests.MockService_Dependencies import MockService_Dependencies
+from monitoring.client.MonitoringClient import MonitoringClient
+
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+DEVICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports
+os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(DEVICE_SERVICE_PORT)
+
+@pytest.fixture(scope='session')
+def mock_service():
+    _service = MockService_Dependencies(MOCKSERVICE_PORT)
+    _service.configure_env_vars()
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def context_client(mock_service : MockService_Dependencies): # pylint: disable=redefined-outer-name
+    _client = ContextClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def monitoring_client(mock_service : MockService_Dependencies): # pylint: disable=redefined-outer-name
+    _client = MonitoringClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def device_service(
+    context_client : ContextClient,         # pylint: disable=redefined-outer-name
+    monitoring_client : MonitoringClient):  # pylint: disable=redefined-outer-name
+
+    _driver_factory = DriverFactory(DRIVERS)
+    _driver_instance_cache = DriverInstanceCache(_driver_factory)
+    _service = DeviceService(_driver_instance_cache)
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name
+    _client = DeviceClient()
+    yield _client
+    _client.close()
+
+def test_prepare_environment(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    context_client.SetContext(Context(**CONTEXT))
+    context_client.SetTopology(Topology(**TOPOLOGY))
diff --git a/src/device/tests/test_unitary.py b/src/device/tests/test_unitary.py
deleted file mode 100644
index 0853da9a5e3572b15e5581413d1a5c765e02444e..0000000000000000000000000000000000000000
--- a/src/device/tests/test_unitary.py
+++ /dev/null
@@ -1,1010 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import calendar, copy, dateutil.parser, grpc, json, logging, operator, os, pytest, queue, time
-from datetime import datetime, timezone
-from typing import Tuple
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
-from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
-from common.message_broker.MessageBroker import MessageBroker
-from common.tools.grpc.Tools import grpc_message_to_json_string
-from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
-from context.Config import (
-    GRPC_SERVICE_PORT as CONTEXT_GRPC_SERVICE_PORT, GRPC_MAX_WORKERS as CONTEXT_GRPC_MAX_WORKERS,
-    GRPC_GRACE_PERIOD as CONTEXT_GRPC_GRACE_PERIOD)
-from context.client.ContextClient import ContextClient
-from context.proto.context_pb2 import DeviceId, DeviceOperationalStatusEnum
-from context.service.grpc_server.ContextService import ContextService
-from device.Config import (
-    GRPC_SERVICE_PORT as DEVICE_GRPC_SERVICE_PORT, GRPC_MAX_WORKERS as DEVICE_GRPC_MAX_WORKERS,
-    GRPC_GRACE_PERIOD as DEVICE_GRPC_GRACE_PERIOD)
-from device.client.DeviceClient import DeviceClient
-from device.proto.context_pb2 import ConfigActionEnum, Context, Device, Topology
-from device.proto.device_pb2 import MonitoringSettings
-from device.proto.kpi_sample_types_pb2 import KpiSampleType
-from device.service.DeviceService import DeviceService
-from device.service.driver_api._Driver import _Driver
-from device.service.driver_api.DriverFactory import DriverFactory
-from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
-from device.service.drivers import DRIVERS
-from device.tests.MockMonitoringService import MockMonitoringService
-from monitoring.Config import (
-    GRPC_SERVICE_PORT as MONITORING_GRPC_SERVICE_PORT, GRPC_MAX_WORKERS as MONITORING_GRPC_MAX_WORKERS,
-    GRPC_GRACE_PERIOD as MONITORING_GRPC_GRACE_PERIOD)
-from monitoring.client.monitoring_client import MonitoringClient
-from .CommonObjects import CONTEXT, TOPOLOGY
-
-from .Device_Emulated import (
-    DEVICE_EMU, DEVICE_EMU_CONFIG_ADDRESSES, DEVICE_EMU_CONFIG_ENDPOINTS, DEVICE_EMU_CONNECT_RULES,
-    DEVICE_EMU_DECONFIG_ADDRESSES, DEVICE_EMU_DECONFIG_ENDPOINTS, DEVICE_EMU_EP_DESCS, DEVICE_EMU_ENDPOINTS_COOKED,
-    DEVICE_EMU_ID, DEVICE_EMU_RECONFIG_ADDRESSES, DEVICE_EMU_UUID)
-ENABLE_EMULATED = True
-
-try:
-    from .Device_OpenConfig_Infinera1 import(
-    #from .Device_OpenConfig_Infinera2 import(
-    #from .Device_OpenConfig_Cisco import(
-    #from .Device_OpenConfig_Adva import(
-
-        DEVICE_OC, DEVICE_OC_CONFIG_RULES, DEVICE_OC_DECONFIG_RULES, DEVICE_OC_CONNECT_RULES, DEVICE_OC_ID,
-        DEVICE_OC_UUID)
-    ENABLE_OPENCONFIG = True
-except ImportError:
-    ENABLE_OPENCONFIG = False
-
-try:
-    from .Device_Transport_Api_CTTC import (
-        DEVICE_TAPI, DEVICE_TAPI_CONNECT_RULES, DEVICE_TAPI_UUID, DEVICE_TAPI_ID, DEVICE_TAPI_CONFIG_RULES,
-        DEVICE_TAPI_DECONFIG_RULES)
-    ENABLE_TAPI = True
-except ImportError:
-    ENABLE_TAPI = False
-
-from .mock_p4runtime_service import MockP4RuntimeService
-try:
-    from .device_p4 import(
-        DEVICE_P4, DEVICE_P4_ID, DEVICE_P4_UUID, DEVICE_P4_NAME, DEVICE_P4_ADDRESS, DEVICE_P4_PORT, DEVICE_P4_WORKERS,
-        DEVICE_P4_GRACE_PERIOD, DEVICE_P4_CONNECT_RULES, DEVICE_P4_CONFIG_RULES)
-    ENABLE_P4 = True
-except ImportError:
-    ENABLE_P4 = False
-
-#ENABLE_EMULATED   = False # set to False to disable tests of Emulated devices
-#ENABLE_OPENCONFIG = False # set to False to disable tests of OpenConfig devices
-#ENABLE_TAPI       = False # set to False to disable tests of TAPI devices
-#ENABLE_P4         = False # set to False to disable tests of P4 devices
-
-ENABLE_OPENCONFIG_CONFIGURE   = True
-ENABLE_OPENCONFIG_MONITOR     = True
-ENABLE_OPENCONFIG_DECONFIGURE = True
-
-
-logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
-logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
-logging.getLogger('monitoring-client').setLevel(logging.WARNING)
-
-LOGGER = logging.getLogger(__name__)
-LOGGER.setLevel(logging.DEBUG)
-
-CONTEXT_GRPC_SERVICE_PORT = 10000 + CONTEXT_GRPC_SERVICE_PORT # avoid privileged ports
-DEVICE_GRPC_SERVICE_PORT = 10000 + DEVICE_GRPC_SERVICE_PORT # avoid privileged ports
-MONITORING_GRPC_SERVICE_PORT = 10000 + MONITORING_GRPC_SERVICE_PORT # avoid privileged ports
-
-DEFAULT_REDIS_SERVICE_HOST = '127.0.0.1'
-DEFAULT_REDIS_SERVICE_PORT = 6379
-DEFAULT_REDIS_DATABASE_ID  = 0
-
-REDIS_CONFIG = {
-    'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST),
-    'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT),
-    'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID',  DEFAULT_REDIS_DATABASE_ID ),
-}
-
-SCENARIOS = [
-    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          ),
-    #('all_redis',    DatabaseBackendEnum.REDIS,    REDIS_CONFIG, MessageBrokerBackendEnum.REDIS,    REDIS_CONFIG),
-]
-
-@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS)
-def context_db_mb(request) -> Tuple[Database, MessageBroker]:
-    name,db_backend,db_settings,mb_backend,mb_settings = request.param
-    msg = 'Running scenario {:s} db_backend={:s}, db_settings={:s}, mb_backend={:s}, mb_settings={:s}...'
-    LOGGER.info(msg.format(str(name), str(db_backend.value), str(db_settings), str(mb_backend.value), str(mb_settings)))
-    _database = Database(get_database_backend(backend=db_backend, **db_settings))
-    _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings))
-    yield _database, _message_broker
-    _message_broker.terminate()
-
-@pytest.fixture(scope='session')
-def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
-    _service = ContextService(
-        context_db_mb[0], context_db_mb[1], port=CONTEXT_GRPC_SERVICE_PORT, max_workers=CONTEXT_GRPC_MAX_WORKERS,
-        grace_period=CONTEXT_GRPC_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name
-    _client = ContextClient(address='127.0.0.1', port=CONTEXT_GRPC_SERVICE_PORT)
-    yield _client
-    _client.close()
-
-@pytest.fixture(scope='session')
-def monitoring_service():
-    _service = MockMonitoringService(port=MONITORING_GRPC_SERVICE_PORT, max_workers=MONITORING_GRPC_MAX_WORKERS,
-        grace_period=MONITORING_GRPC_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def monitoring_client(monitoring_service : MockMonitoringService): # pylint: disable=redefined-outer-name
-    _client = MonitoringClient(server='127.0.0.1', port=MONITORING_GRPC_SERVICE_PORT)
-    #yield _client
-    #_client.close()
-    return _client
-
-@pytest.fixture(scope='session')
-def device_service(
-    context_client : ContextClient,         # pylint: disable=redefined-outer-name
-    monitoring_client : MonitoringClient):  # pylint: disable=redefined-outer-name
-
-    _driver_factory = DriverFactory(DRIVERS)
-    _driver_instance_cache = DriverInstanceCache(_driver_factory)
-    _service = DeviceService(
-        context_client, monitoring_client, _driver_instance_cache, port=DEVICE_GRPC_SERVICE_PORT,
-        max_workers=DEVICE_GRPC_MAX_WORKERS, grace_period=DEVICE_GRPC_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name
-    _client = DeviceClient(address='127.0.0.1', port=DEVICE_GRPC_SERVICE_PORT)
-    yield _client
-    _client.close()
-
-@pytest.fixture(scope='session')
-def p4runtime_service():
-    _service = MockP4RuntimeService(
-        address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT,
-        max_workers=DEVICE_P4_WORKERS,
-        grace_period=DEVICE_P4_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-
-def test_prepare_environment(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    context_client.SetContext(Context(**CONTEXT))
-    context_client.SetTopology(Topology(**TOPOLOGY))
-
-
-# ----- Test Device Driver Emulated --------------------------------------------
-# Device Driver Emulated tests are used to validate Driver API as well as Emulated Device Driver. Note that other
-# Drivers might support a different set of resource paths, and attributes/values per resource; however, they must
-# implement the Driver API.
-
-def test_device_emulated_add_error_cases(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_EMULATED: pytest.skip('Skipping test: No Emulated device has been configured')
-
-    with pytest.raises(grpc.RpcError) as e:
-        DEVICE_EMU_WITH_ENDPOINTS = copy.deepcopy(DEVICE_EMU)
-        DEVICE_EMU_WITH_ENDPOINTS['device_endpoints'].append(json_endpoint(DEVICE_EMU_ID, 'ep-id', 'ep-type'))
-        device_client.AddDevice(Device(**DEVICE_EMU_WITH_ENDPOINTS))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg_head = 'device.device_endpoints(['
-    msg_tail = ']) is invalid; RPC method AddDevice does not accept Endpoints. '\
-               'Endpoints are discovered through interrogation of the physical device.'
-    except_msg = str(e.value.details())
-    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
-
-    with pytest.raises(grpc.RpcError) as e:
-        DEVICE_EMU_WITH_EXTRA_RULES = copy.deepcopy(DEVICE_EMU)
-        DEVICE_EMU_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONNECT_RULES)
-        DEVICE_EMU_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONFIG_ENDPOINTS)
-        device_client.AddDevice(Device(**DEVICE_EMU_WITH_EXTRA_RULES))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg_head = 'device.device_config.config_rules(['
-    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
-               'with "_connect/" tag. Others should be configured after adding the device.'
-    except_msg = str(e.value.details())
-    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
-
-
-def test_device_emulated_add_correct(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_EMULATED: pytest.skip('Skipping test: No Emulated device has been configured')
-
-    DEVICE_EMU_WITH_CONNECT_RULES = copy.deepcopy(DEVICE_EMU)
-    DEVICE_EMU_WITH_CONNECT_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONNECT_RULES)
-    device_client.AddDevice(Device(**DEVICE_EMU_WITH_CONNECT_RULES))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_EMU_UUID) # we know the driver exists now
-    assert driver is not None
-
-
-def test_device_emulated_get(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_EMULATED: pytest.skip('Skipping test: No Emulated device has been configured')
-
-    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_EMU_ID))
-    LOGGER.info('initial_config = {:s}'.format(grpc_message_to_json_string(initial_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
-    LOGGER.info('device_data = {:s}'.format(grpc_message_to_json_string(device_data)))
-
-
-def test_device_emulated_configure(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_EMULATED: pytest.skip('Skipping test: No Emulated device has been configured')
-
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_EMU_UUID) # we know the driver exists now
-    assert driver is not None
-
-    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-    assert len(driver_config) == len(DEVICE_EMU_ENDPOINTS_COOKED)
-    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
-        assert endpoint_cooked in driver_config
-
-    DEVICE_EMU_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_EMU)
-    DEVICE_EMU_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONFIG_ENDPOINTS)
-    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_CONFIG_RULES))
-
-    DEVICE_EMU_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_EMU)
-    DEVICE_EMU_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONFIG_ADDRESSES)
-    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_CONFIG_RULES))
-
-    DEVICE_EMU_WITH_OPERATIONAL_STATUS = copy.deepcopy(DEVICE_EMU)
-    DEVICE_EMU_WITH_OPERATIONAL_STATUS['device_operational_status'] = \
-        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
-    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_OPERATIONAL_STATUS))
-
-    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-    assert len(driver_config) == len(DEVICE_EMU_ENDPOINTS_COOKED) + len(DEVICE_EMU_CONFIG_ADDRESSES)
-    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
-        endpoint_cooked = copy.deepcopy(endpoint_cooked)
-        endpoint_cooked[1]['enabled'] = True
-        assert endpoint_cooked in driver_config
-    for config_rule in DEVICE_EMU_CONFIG_ADDRESSES:
-        assert (config_rule['resource_key'], json.loads(config_rule['resource_value'])) in driver_config
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
-    assert device_data.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
-
-    config_rules = [
-        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
-        for config_rule in device_data.device_config.config_rules
-    ]
-    #LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
-    #    '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
-    RESULTING_CONFIG_ENDPOINTS = {cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ENDPOINTS)}
-    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
-        values = json.loads(RESULTING_CONFIG_ENDPOINTS[endpoint_cooked[0]]['resource_value'])
-        values.update(endpoint_cooked[1])
-        RESULTING_CONFIG_ENDPOINTS[endpoint_cooked[0]]['resource_value'] = json.dumps(values, sort_keys=True)
-    for config_rule in RESULTING_CONFIG_ENDPOINTS.values():
-        config_rule = (
-            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'],
-            json.loads(json.dumps(config_rule['resource_value'])))
-        assert config_rule in config_rules
-    for config_rule in DEVICE_EMU_CONFIG_ADDRESSES:
-        config_rule = (
-            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'],
-            json.loads(json.dumps(config_rule['resource_value'])))
-        assert config_rule in config_rules
-
-    # Try to reconfigure...
-
-    DEVICE_EMU_WITH_RECONFIG_RULES = copy.deepcopy(DEVICE_EMU)
-    DEVICE_EMU_WITH_RECONFIG_RULES['device_operational_status'] = \
-        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
-    DEVICE_EMU_WITH_RECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_RECONFIG_ADDRESSES)
-    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_RECONFIG_RULES))
-
-    RESULTING_CONFIG_RULES = {cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ENDPOINTS)}
-    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
-        values = json.loads(RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'])
-        values.update(endpoint_cooked[1])
-        RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'] = json.dumps(values, sort_keys=True)
-    RESULTING_CONFIG_RULES.update({cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ADDRESSES)})
-    for reconfig_rule in DEVICE_EMU_RECONFIG_ADDRESSES:
-        if reconfig_rule['action'] == ConfigActionEnum.CONFIGACTION_DELETE:
-            RESULTING_CONFIG_RULES.pop(reconfig_rule['resource_key'], None)
-        else:
-            RESULTING_CONFIG_RULES[reconfig_rule['resource_key']] = reconfig_rule
-    RESULTING_CONFIG_RULES = RESULTING_CONFIG_RULES.values()
-    #LOGGER.info('RESULTING_CONFIG_RULES = {:s}'.format(str(RESULTING_CONFIG_RULES)))
-
-    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    driver_config = json.loads(json.dumps(driver_config)) # prevent integer keys to fail matching with string keys
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-    assert len(driver_config) == len(RESULTING_CONFIG_RULES)
-    for config_rule in RESULTING_CONFIG_RULES:
-        resource = [config_rule['resource_key'], json.loads(config_rule['resource_value'])]
-        assert resource in driver_config
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
-    config_rules = [
-        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
-        for config_rule in device_data.device_config.config_rules
-    ]
-    #LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
-    #    '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
-    for config_rule in RESULTING_CONFIG_RULES:
-        config_rule = (
-            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'], config_rule['resource_value'])
-        assert config_rule in config_rules
-
-
-def test_device_emulated_monitor(
-    context_client : ContextClient,                 # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
-    device_service : DeviceService,                 # pylint: disable=redefined-outer-name
-    monitoring_service : MockMonitoringService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_EMULATED: pytest.skip('Skipping test: No Emulated device has been configured')
-
-    device_uuid = DEVICE_EMU_UUID
-    json_device_id = DEVICE_EMU_ID
-    device_id = DeviceId(**json_device_id)
-    device_data = context_client.GetDevice(device_id)
-    #LOGGER.info('device_data = \n{:s}'.format(str(device_data)))
-
-    driver : _Driver = device_service.driver_instance_cache.get(device_uuid) # we know the driver exists now
-    assert driver is not None
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-    #assert len(driver_config) == len(DEVICE_EMU_ENDPOINTS_COOKED) + len(DEVICE_EMU_CONFIG_ADDRESSES)
-
-    SAMPLING_DURATION_SEC = 10.0
-    SAMPLING_INTERVAL_SEC = 2.0
-
-    MONITORING_SETTINGS_LIST = []
-    KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {}
-    for endpoint in device_data.device_endpoints:
-        endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
-        for sample_type_id in endpoint.kpi_sample_types:
-            sample_type_name = str(KpiSampleType.Name(sample_type_id)).upper().replace('KPISAMPLETYPE_', '')
-            kpi_uuid = '{:s}-{:s}-{:s}-kpi_uuid'.format(device_uuid, endpoint_uuid, str(sample_type_id))
-            monitoring_settings = {
-                'kpi_id'        : {'kpi_id': {'uuid': kpi_uuid}},
-                'kpi_descriptor': {
-                    'kpi_description': 'Metric {:s} for Endpoint {:s} in Device {:s}'.format(
-                        sample_type_name, endpoint_uuid, device_uuid),
-                    'kpi_sample_type': sample_type_id,
-                    'device_id': json_device_id,
-                    'endpoint_id': json_endpoint_id(json_device_id, endpoint_uuid),
-                },
-                'sampling_duration_s': SAMPLING_DURATION_SEC,
-                'sampling_interval_s': SAMPLING_INTERVAL_SEC,
-            }
-            MONITORING_SETTINGS_LIST.append(monitoring_settings)
-            KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] = 0
-
-    NUM_SAMPLES_EXPECTED_PER_KPI = SAMPLING_DURATION_SEC / SAMPLING_INTERVAL_SEC
-    NUM_SAMPLES_EXPECTED = len(MONITORING_SETTINGS_LIST) * NUM_SAMPLES_EXPECTED_PER_KPI
-
-    # Start monitoring the device
-    t_start_monitoring = datetime.timestamp(datetime.utcnow())
-    for monitoring_settings in MONITORING_SETTINGS_LIST:
-        device_client.MonitorDeviceKpi(MonitoringSettings(**monitoring_settings))
-
-    # wait to receive the expected number of samples
-    # if takes more than 1.5 times the sampling duration, assume there is an error
-    time_ini = time.time()
-    queue_samples : queue.Queue = monitoring_service.queue_samples
-    received_samples = []
-    while (len(received_samples) < NUM_SAMPLES_EXPECTED) and (time.time() - time_ini < SAMPLING_DURATION_SEC * 1.5):
-        try:
-            received_sample = queue_samples.get(block=True, timeout=SAMPLING_INTERVAL_SEC / NUM_SAMPLES_EXPECTED)
-            #LOGGER.info('received_sample = {:s}'.format(str(received_sample)))
-            received_samples.append(received_sample)
-        except queue.Empty:
-            continue
-
-    t_end_monitoring = datetime.timestamp(datetime.utcnow())
-
-    #LOGGER.info('received_samples = {:s}'.format(str(received_samples)))
-    LOGGER.info('len(received_samples) = {:s}'.format(str(len(received_samples))))
-    LOGGER.info('NUM_SAMPLES_EXPECTED = {:s}'.format(str(NUM_SAMPLES_EXPECTED)))
-    assert len(received_samples) == NUM_SAMPLES_EXPECTED
-    for received_sample in received_samples:
-        kpi_uuid = received_sample.kpi_id.kpi_id.uuid
-        assert kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED
-        assert isinstance(received_sample.timestamp, str)
-        try:
-            timestamp = float(received_sample.timestamp)
-        except ValueError:
-            dt_time = dateutil.parser.isoparse(received_sample.timestamp).replace(tzinfo=timezone.utc)
-            timestamp = float(calendar.timegm(dt_time.timetuple())) + (dt_time.microsecond / 1.e6)
-        assert timestamp > t_start_monitoring
-        assert timestamp < t_end_monitoring
-        assert received_sample.kpi_value.HasField('floatVal') or received_sample.kpi_value.HasField('intVal')
-        kpi_value = getattr(received_sample.kpi_value, received_sample.kpi_value.WhichOneof('value'))
-        assert isinstance(kpi_value, (float, int))
-        KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] += 1
-
-    LOGGER.info('KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {:s}'.format(str(KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED)))
-    for kpi_uuid, num_samples_received in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED.items():
-        assert num_samples_received == NUM_SAMPLES_EXPECTED_PER_KPI
-
-    # Unsubscribe monitoring
-    for kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED.keys():
-        MONITORING_SETTINGS_UNSUBSCRIBE = {
-            'kpi_id'             : {'kpi_id': {'uuid': kpi_uuid}},
-            'sampling_duration_s': -1, # negative value in sampling_duration_s or sampling_interval_s means unsibscribe
-            'sampling_interval_s': -1, # kpi_id is mandatory to unsibscribe
-        }
-        device_client.MonitorDeviceKpi(MonitoringSettings(**MONITORING_SETTINGS_UNSUBSCRIBE))
-
-
-def test_device_emulated_deconfigure(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_EMULATED: pytest.skip('Skipping test: No Emulated device has been configured')
-
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_EMU_UUID) # we know the driver exists now
-    assert driver is not None
-
-    driver_config = driver.GetConfig()
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    DEVICE_EMU_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_EMU)
-    DEVICE_EMU_WITH_DECONFIG_RULES['device_operational_status'] = \
-        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
-    DEVICE_EMU_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_DECONFIG_ADDRESSES)
-    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_DECONFIG_RULES))
-
-    RESULTING_CONFIG_RULES = {cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ENDPOINTS)}
-    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
-        values = json.loads(RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'])
-        values.update(endpoint_cooked[1])
-        RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'] = json.dumps(values, sort_keys=True)
-    RESULTING_CONFIG_RULES = RESULTING_CONFIG_RULES.values()
-    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    driver_config = json.loads(json.dumps(driver_config)) # prevent integer keys to fail matching with string keys
-    driver_config = list(filter(
-        lambda config_rule: (
-            not isinstance(config_rule[1], str) or not config_rule[1].startswith('do_sampling (trigger:')),
-        driver_config))
-    LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-    LOGGER.info('RESULTING_CONFIG_RULES = {:s}'.format(str(RESULTING_CONFIG_RULES)))
-    assert len(driver_config) == len(RESULTING_CONFIG_RULES)
-    for config_rule in RESULTING_CONFIG_RULES:
-        config_rule = [config_rule['resource_key'], json.loads(config_rule['resource_value'])]
-        #LOGGER.info('config_rule = {:s}'.format(str(config_rule)))
-        assert config_rule in driver_config
-
-    DEVICE_EMU_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_EMU)
-    DEVICE_EMU_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_DECONFIG_ENDPOINTS)
-    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_DECONFIG_RULES))
-
-    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    driver_config = json.loads(json.dumps(driver_config)) # prevent integer keys to fail matching with string keys
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-    assert len(driver_config) == 0
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
-    config_rules = device_data.device_config.config_rules
-    LOGGER.info('config_rules = {:s}'.format(str(config_rules)))
-    clean_config_rules = []
-    for config_rule in config_rules:
-        config_rule_value = json.loads(config_rule.resource_value)
-        if not isinstance(config_rule_value, str): clean_config_rules.append(config_rule)
-        if config_rule_value.startswith('do_sampling (trigger:'): continue
-        clean_config_rules.append(config_rule)
-    LOGGER.info('clean_config_rules = {:s}'.format(str(clean_config_rules)))
-    assert len(clean_config_rules) == 0
-
-
-def test_device_emulated_delete(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_EMULATED: pytest.skip('Skipping test: No Emulated device has been configured')
-
-    device_client.DeleteDevice(DeviceId(**DEVICE_EMU_ID))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_EMU_UUID, {})
-    assert driver is None
-
-
-# ----- Test Device Driver OpenConfig ------------------------------------------
-
-def test_device_openconfig_add_error_cases(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
-
-    with pytest.raises(grpc.RpcError) as e:
-        DEVICE_OC_WITH_EXTRA_RULES = copy.deepcopy(DEVICE_OC)
-        DEVICE_OC_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONNECT_RULES)
-        DEVICE_OC_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONFIG_RULES)
-        device_client.AddDevice(Device(**DEVICE_OC_WITH_EXTRA_RULES))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg_head = 'device.device_config.config_rules(['
-    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
-               'with "_connect/" tag. Others should be configured after adding the device.'
-    except_msg = str(e.value.details())
-    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
-
-
-def test_device_openconfig_add_correct(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
-
-    DEVICE_OC_WITH_CONNECT_RULES = copy.deepcopy(DEVICE_OC)
-    DEVICE_OC_WITH_CONNECT_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONNECT_RULES)
-    device_client.AddDevice(Device(**DEVICE_OC_WITH_CONNECT_RULES))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_OC_UUID) # we know the driver exists now
-    assert driver is not None
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
-    config_rules = [
-        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
-        for config_rule in device_data.device_config.config_rules
-    ]
-    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
-        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
-
-
-def test_device_openconfig_get(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
-
-    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_OC_ID))
-    LOGGER.info('initial_config = {:s}'.format(grpc_message_to_json_string(initial_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
-    LOGGER.info('device_data = {:s}'.format(grpc_message_to_json_string(device_data)))
-
-
-def test_device_openconfig_configure(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
-    if not ENABLE_OPENCONFIG_CONFIGURE: pytest.skip('Skipping test OpenConfig configure')
-
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_OC_UUID) # we know the driver exists now
-    assert driver is not None
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    DEVICE_OC_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_OC)
-    DEVICE_OC_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONFIG_RULES)
-    device_client.ConfigureDevice(Device(**DEVICE_OC_WITH_CONFIG_RULES))
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
-    config_rules = [
-        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
-        for config_rule in device_data.device_config.config_rules
-    ]
-    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
-        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
-    for config_rule in DEVICE_OC_CONFIG_RULES:
-        config_rule = (
-            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'], config_rule['resource_value'])
-        assert config_rule in config_rules
-
-
-def test_device_openconfig_monitor(
-    context_client : ContextClient,                 # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
-    device_service : DeviceService,                 # pylint: disable=redefined-outer-name
-    monitoring_service : MockMonitoringService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
-    if not ENABLE_OPENCONFIG_MONITOR: pytest.skip('Skipping test OpenConfig monitor')
-
-    device_uuid = DEVICE_OC_UUID
-    json_device_id = DEVICE_OC_ID
-    device_id = DeviceId(**json_device_id)
-    device_data = context_client.GetDevice(device_id)
-    #LOGGER.info('device_data = \n{:s}'.format(str(device_data)))
-
-    driver : _Driver = device_service.driver_instance_cache.get(device_uuid) # we know the driver exists now
-    assert driver is not None
-
-    SAMPLING_DURATION_SEC = 60.0
-    SAMPLING_INTERVAL_SEC = 15.0
-
-    MONITORING_SETTINGS_LIST = []
-    KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {}
-    for endpoint in device_data.device_endpoints:
-        endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
-        for sample_type_id in endpoint.kpi_sample_types:
-            sample_type_name = str(KpiSampleType.Name(sample_type_id)).upper().replace('KPISAMPLETYPE_', '')
-            kpi_uuid = '{:s}-{:s}-{:s}-kpi_uuid'.format(device_uuid, endpoint_uuid, str(sample_type_id))
-            monitoring_settings = {
-                'kpi_id'        : {'kpi_id': {'uuid': kpi_uuid}},
-                'kpi_descriptor': {
-                    'kpi_description': 'Metric {:s} for Endpoint {:s} in Device {:s}'.format(
-                        sample_type_name, endpoint_uuid, device_uuid),
-                    'kpi_sample_type': sample_type_id,
-                    'device_id': json_device_id,
-                    'endpoint_id': json_endpoint_id(json_device_id, endpoint_uuid),
-                },
-                'sampling_duration_s': SAMPLING_DURATION_SEC,
-                'sampling_interval_s': SAMPLING_INTERVAL_SEC,
-            }
-            MONITORING_SETTINGS_LIST.append(monitoring_settings)
-            KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] = 0
-
-    NUM_SAMPLES_EXPECTED_PER_KPI = SAMPLING_DURATION_SEC / SAMPLING_INTERVAL_SEC
-    NUM_SAMPLES_EXPECTED = len(MONITORING_SETTINGS_LIST) * NUM_SAMPLES_EXPECTED_PER_KPI
-
-    # Start monitoring the device
-    t_start_monitoring = datetime.timestamp(datetime.utcnow())
-    for monitoring_settings in MONITORING_SETTINGS_LIST:
-        device_client.MonitorDeviceKpi(MonitoringSettings(**monitoring_settings))
-
-    # wait to receive the expected number of samples
-    # if takes more than 1.5 times the sampling duration, assume there is an error
-    time_ini = time.time()
-    queue_samples : queue.Queue = monitoring_service.queue_samples
-    received_samples = []
-    while (len(received_samples) < NUM_SAMPLES_EXPECTED) and (time.time() - time_ini < SAMPLING_DURATION_SEC * 1.5):
-        try:
-            received_sample = queue_samples.get(block=True, timeout=SAMPLING_INTERVAL_SEC / NUM_SAMPLES_EXPECTED)
-            #LOGGER.info('received_sample = {:s}'.format(str(received_sample)))
-            received_samples.append(received_sample)
-        except queue.Empty:
-            continue
-
-    t_end_monitoring = datetime.timestamp(datetime.utcnow())
-
-    #LOGGER.info('received_samples = {:s}'.format(str(received_samples)))
-    LOGGER.info('len(received_samples) = {:s}'.format(str(len(received_samples))))
-    LOGGER.info('NUM_SAMPLES_EXPECTED = {:s}'.format(str(NUM_SAMPLES_EXPECTED)))
-    #assert len(received_samples) == NUM_SAMPLES_EXPECTED
-    for received_sample in received_samples:
-        kpi_uuid = received_sample.kpi_id.kpi_id.uuid
-        assert kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED
-        assert isinstance(received_sample.timestamp, str)
-        try:
-            timestamp = float(received_sample.timestamp)
-        except ValueError:
-            dt_time = dateutil.parser.isoparse(received_sample.timestamp).replace(tzinfo=timezone.utc)
-            timestamp = float(calendar.timegm(dt_time.timetuple())) + (dt_time.microsecond / 1.e6)
-        assert timestamp > t_start_monitoring
-        assert timestamp < t_end_monitoring
-        assert received_sample.kpi_value.HasField('floatVal') or received_sample.kpi_value.HasField('intVal')
-        kpi_value = getattr(received_sample.kpi_value, received_sample.kpi_value.WhichOneof('value'))
-        assert isinstance(kpi_value, (float, int))
-        KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] += 1
-
-    LOGGER.info('KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {:s}'.format(str(KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED)))
-    # TODO: review why num_samples_received per KPI != NUM_SAMPLES_EXPECTED_PER_KPI
-    #for kpi_uuid, num_samples_received in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED.items():
-    #    assert num_samples_received == NUM_SAMPLES_EXPECTED_PER_KPI
-
-    # Unsubscribe monitoring
-    for kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED.keys():
-        MONITORING_SETTINGS_UNSUBSCRIBE = {
-            'kpi_id'             : {'kpi_id': {'uuid': kpi_uuid}},
-            'sampling_duration_s': -1, # negative value in sampling_duration_s or sampling_interval_s means unsibscribe
-            'sampling_interval_s': -1, # kpi_id is mandatory to unsibscribe
-        }
-        device_client.MonitorDeviceKpi(MonitoringSettings(**MONITORING_SETTINGS_UNSUBSCRIBE))
-
-
-def test_device_openconfig_deconfigure(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
-    if not ENABLE_OPENCONFIG_DECONFIGURE: pytest.skip('Skipping test OpenConfig deconfigure')
-
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_OC_UUID) # we know the driver exists now
-    assert driver is not None
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    DEVICE_OC_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_OC)
-    DEVICE_OC_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_OC_DECONFIG_RULES)
-    device_client.ConfigureDevice(Device(**DEVICE_OC_WITH_DECONFIG_RULES))
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
-    config_rules = [
-        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
-        for config_rule in device_data.device_config.config_rules
-    ]
-    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
-        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
-    for config_rule in DEVICE_OC_DECONFIG_RULES:
-        action_set = ConfigActionEnum.Name(ConfigActionEnum.CONFIGACTION_SET)
-        config_rule = (action_set, config_rule['resource_key'], config_rule['resource_value'])
-        assert config_rule not in config_rules
-
-
-def test_device_openconfig_delete(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
-
-    device_client.DeleteDevice(DeviceId(**DEVICE_OC_ID))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_OC_UUID, {})
-    assert driver is None
-
-
-# ----- Test Device Driver TAPI ------------------------------------------------
-
-def test_device_tapi_add_error_cases(
-    device_client : DeviceClient):      # pylint: disable=redefined-outer-name
-
-    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
-
-    with pytest.raises(grpc.RpcError) as e:
-        DEVICE_TAPI_WITH_EXTRA_RULES = copy.deepcopy(DEVICE_TAPI)
-        DEVICE_TAPI_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONNECT_RULES)
-        DEVICE_TAPI_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONFIG_RULES)
-        device_client.AddDevice(Device(**DEVICE_TAPI_WITH_EXTRA_RULES))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg_head = 'device.device_config.config_rules(['
-    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
-               'with "_connect/" tag. Others should be configured after adding the device.'
-    except_msg = str(e.value.details())
-    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
-
-
-def test_device_tapi_add_correct(
-    device_client: DeviceClient,        # pylint: disable=redefined-outer-name
-    device_service: DeviceService):     # pylint: disable=redefined-outer-name
-
-    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
-
-    DEVICE_TAPI_WITH_CONNECT_RULES = copy.deepcopy(DEVICE_TAPI)
-    DEVICE_TAPI_WITH_CONNECT_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONNECT_RULES)
-    device_client.AddDevice(Device(**DEVICE_TAPI_WITH_CONNECT_RULES))
-    driver: _Driver = device_service.driver_instance_cache.get(DEVICE_TAPI_UUID)
-    assert driver is not None
-
-
-def test_device_tapi_get(
-    context_client: ContextClient,      # pylint: disable=redefined-outer-name
-    device_client: DeviceClient):       # pylint: disable=redefined-outer-name
-
-    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
-
-    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_TAPI_ID))
-    LOGGER.info('initial_config = {:s}'.format(grpc_message_to_json_string(initial_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_TAPI_ID))
-    LOGGER.info('device_data = {:s}'.format(grpc_message_to_json_string(device_data)))
-
-
-def test_device_tapi_configure(
-    context_client: ContextClient,      # pylint: disable=redefined-outer-name
-    device_client: DeviceClient,        # pylint: disable=redefined-outer-name
-    device_service: DeviceService):     # pylint: disable=redefined-outer-name
-
-    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
-
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_TAPI_UUID)
-    assert driver is not None
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    DEVICE_TAPI_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_TAPI)
-    DEVICE_TAPI_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONFIG_RULES)
-    device_client.ConfigureDevice(Device(**DEVICE_TAPI_WITH_CONFIG_RULES))
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_TAPI_ID))
-    config_rules = [
-        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
-        for config_rule in device_data.device_config.config_rules
-    ]
-    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
-        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
-    for config_rule in DEVICE_TAPI_CONFIG_RULES:
-        config_rule = (
-            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'], config_rule['resource_value'])
-        assert config_rule in config_rules
-
-
-def test_device_tapi_deconfigure(
-    context_client: ContextClient,      # pylint: disable=redefined-outer-name
-    device_client: DeviceClient,        # pylint: disable=redefined-outer-name
-    device_service: DeviceService):     # pylint: disable=redefined-outer-name
-
-    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
-
-    driver: _Driver = device_service.driver_instance_cache.get(DEVICE_TAPI_UUID)
-    assert driver is not None
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    DEVICE_TAPI_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_TAPI)
-    DEVICE_TAPI_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_DECONFIG_RULES)
-    device_client.ConfigureDevice(Device(**DEVICE_TAPI_WITH_DECONFIG_RULES))
-
-    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
-    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
-    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_TAPI_ID))
-    config_rules = [
-        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
-        for config_rule in device_data.device_config.config_rules
-    ]
-    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
-        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
-    for config_rule in DEVICE_TAPI_DECONFIG_RULES:
-        action_set = ConfigActionEnum.Name(ConfigActionEnum.CONFIGACTION_SET)
-        config_rule = (action_set, config_rule['resource_key'], config_rule['resource_value'])
-        assert config_rule not in config_rules
-
-
-def test_device_tapi_delete(
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
-
-    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
-
-    device_client.DeleteDevice(DeviceId(**DEVICE_TAPI_ID))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_TAPI_UUID, {})
-    assert driver is None
-
-
-# ----- Test Device Driver P4 --------------------------------------------------
-
-def test_device_p4_add_error_cases(
-        context_client: ContextClient,   # pylint: disable=redefined-outer-name
-        device_client: DeviceClient,     # pylint: disable=redefined-outer-name
-        device_service: DeviceService):  # pylint: disable=redefined-outer-name
-
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
-
-    with pytest.raises(grpc.RpcError) as e:
-        device_p4_with_extra_rules = copy.deepcopy(DEVICE_P4)
-        device_p4_with_extra_rules['device_config']['config_rules'].extend(
-            DEVICE_P4_CONNECT_RULES)
-        device_p4_with_extra_rules['device_config']['config_rules'].extend(
-            DEVICE_P4_CONFIG_RULES)
-        device_client.AddDevice(Device(**device_p4_with_extra_rules))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg_head = 'device.device_config.config_rules(['
-    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
-               'with "_connect/" tag. Others should be configured after adding the device.'
-    except_msg = str(e.value.details())
-    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
-
-
-def test_device_p4_add_correct(
-        context_client: ContextClient,              # pylint: disable=redefined-outer-name
-        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
-        device_service: DeviceService,              # pylint: disable=redefined-outer-name
-        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
-
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
-
-    device_p4_with_connect_rules = copy.deepcopy(DEVICE_P4)
-    device_p4_with_connect_rules['device_config']['config_rules'].extend(
-        DEVICE_P4_CONNECT_RULES)
-    device_client.AddDevice(Device(**device_p4_with_connect_rules))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_P4_UUID)
-    assert driver is not None
-
-
-def test_device_p4_get(
-        context_client: ContextClient,              # pylint: disable=redefined-outer-name
-        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
-        device_service: DeviceService,              # pylint: disable=redefined-outer-name
-        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
-
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
-
-    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_P4_ID))
-    LOGGER.info('initial_config = {:s}'.format(
-        grpc_message_to_json_string(initial_config)))
-
-    device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
-    LOGGER.info('device_data = {:s}'.format(
-        grpc_message_to_json_string(device_data)))
-
-
-def test_device_p4_configure(
-        context_client: ContextClient,              # pylint: disable=redefined-outer-name
-        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
-        device_service: DeviceService,              # pylint: disable=redefined-outer-name
-        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
-
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
-
-    pytest.skip('Skipping test for unimplemented method')
-
-
-def test_device_p4_deconfigure(
-        context_client: ContextClient,              # pylint: disable=redefined-outer-name
-        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
-        device_service: DeviceService,              # pylint: disable=redefined-outer-name
-        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
-
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
-
-    pytest.skip('Skipping test for unimplemented method')
-
-
-def test_device_p4_delete(
-        context_client: ContextClient,              # pylint: disable=redefined-outer-name
-        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
-        device_service: DeviceService,              # pylint: disable=redefined-outer-name
-        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
-
-    if not ENABLE_P4: pytest.skip('Skipping test: No P4 device has been configured')
-
-    device_client.DeleteDevice(DeviceId(**DEVICE_P4_ID))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_P4_UUID)
-    assert driver is None
diff --git a/src/device/tests/test_unitary_emulated.py b/src/device/tests/test_unitary_emulated.py
new file mode 100644
index 0000000000000000000000000000000000000000..67a2e9c33c11711ed3343c688f7bc5a88316eca0
--- /dev/null
+++ b/src/device/tests/test_unitary_emulated.py
@@ -0,0 +1,378 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import calendar, copy, dateutil.parser, grpc, json, logging, operator, pytest, queue, time
+from datetime import datetime, timezone
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
+from context.client.ContextClient import ContextClient
+from context.proto.context_pb2 import DeviceId, DeviceOperationalStatusEnum
+from device.client.DeviceClient import DeviceClient
+from device.proto.context_pb2 import ConfigActionEnum, Device
+from device.proto.device_pb2 import MonitoringSettings
+from device.proto.kpi_sample_types_pb2 import KpiSampleType
+from device.service.DeviceService import DeviceService
+from device.service.driver_api._Driver import _Driver
+from .MockService_Dependencies import MockService_Dependencies
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+    # be careful, order of symbols is important here!
+    mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment)
+
+from .Device_Emulated import (
+    DEVICE_EMU, DEVICE_EMU_CONFIG_ADDRESSES, DEVICE_EMU_CONFIG_ENDPOINTS, DEVICE_EMU_CONNECT_RULES,
+    DEVICE_EMU_DECONFIG_ADDRESSES, DEVICE_EMU_DECONFIG_ENDPOINTS, DEVICE_EMU_ENDPOINTS_COOKED, DEVICE_EMU_ID,
+    DEVICE_EMU_RECONFIG_ADDRESSES, DEVICE_EMU_UUID)
+
+logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
+logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
+logging.getLogger('monitoring-client').setLevel(logging.WARNING)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+# ----- Test Device Driver Emulated --------------------------------------------
+# Device Driver Emulated tests are used to validate Driver API as well as Emulated Device Driver. Note that other
+# Drivers might support a different set of resource paths, and attributes/values per resource; however, they must
+# implement the Driver API.
+
+def test_device_emulated_add_error_cases(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    with pytest.raises(grpc.RpcError) as e:
+        DEVICE_EMU_WITH_ENDPOINTS = copy.deepcopy(DEVICE_EMU)
+        DEVICE_EMU_WITH_ENDPOINTS['device_endpoints'].append(json_endpoint(DEVICE_EMU_ID, 'ep-id', 'ep-type'))
+        device_client.AddDevice(Device(**DEVICE_EMU_WITH_ENDPOINTS))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg_head = 'device.device_endpoints(['
+    msg_tail = ']) is invalid; RPC method AddDevice does not accept Endpoints. '\
+               'Endpoints are discovered through interrogation of the physical device.'
+    except_msg = str(e.value.details())
+    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
+
+    with pytest.raises(grpc.RpcError) as e:
+        DEVICE_EMU_WITH_EXTRA_RULES = copy.deepcopy(DEVICE_EMU)
+        DEVICE_EMU_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONNECT_RULES)
+        DEVICE_EMU_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONFIG_ENDPOINTS)
+        device_client.AddDevice(Device(**DEVICE_EMU_WITH_EXTRA_RULES))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg_head = 'device.device_config.config_rules(['
+    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
+               'with "_connect/" tag. Others should be configured after adding the device.'
+    except_msg = str(e.value.details())
+    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
+
+
+def test_device_emulated_add_correct(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    DEVICE_EMU_WITH_CONNECT_RULES = copy.deepcopy(DEVICE_EMU)
+    DEVICE_EMU_WITH_CONNECT_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONNECT_RULES)
+    device_client.AddDevice(Device(**DEVICE_EMU_WITH_CONNECT_RULES))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_EMU_UUID) # we know the driver exists now
+    assert driver is not None
+
+
+def test_device_emulated_get(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_EMU_ID))
+    LOGGER.info('initial_config = {:s}'.format(grpc_message_to_json_string(initial_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
+    LOGGER.info('device_data = {:s}'.format(grpc_message_to_json_string(device_data)))
+
+
+def test_device_emulated_configure(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_EMU_UUID) # we know the driver exists now
+    assert driver is not None
+
+    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+    assert len(driver_config) == len(DEVICE_EMU_ENDPOINTS_COOKED)
+    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
+        assert endpoint_cooked in driver_config
+
+    DEVICE_EMU_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_EMU)
+    DEVICE_EMU_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONFIG_ENDPOINTS)
+    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_CONFIG_RULES))
+
+    DEVICE_EMU_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_EMU)
+    DEVICE_EMU_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_CONFIG_ADDRESSES)
+    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_CONFIG_RULES))
+
+    DEVICE_EMU_WITH_OPERATIONAL_STATUS = copy.deepcopy(DEVICE_EMU)
+    DEVICE_EMU_WITH_OPERATIONAL_STATUS['device_operational_status'] = \
+        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_OPERATIONAL_STATUS))
+
+    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+    assert len(driver_config) == len(DEVICE_EMU_ENDPOINTS_COOKED) + len(DEVICE_EMU_CONFIG_ADDRESSES)
+    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
+        endpoint_cooked = copy.deepcopy(endpoint_cooked)
+        endpoint_cooked[1]['enabled'] = True
+        assert endpoint_cooked in driver_config
+    for config_rule in DEVICE_EMU_CONFIG_ADDRESSES:
+        assert (config_rule['resource_key'], json.loads(config_rule['resource_value'])) in driver_config
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
+    assert device_data.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
+        for config_rule in device_data.device_config.config_rules
+    ]
+    #LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+    #    '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
+    RESULTING_CONFIG_ENDPOINTS = {cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ENDPOINTS)}
+    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
+        values = json.loads(RESULTING_CONFIG_ENDPOINTS[endpoint_cooked[0]]['resource_value'])
+        values.update(endpoint_cooked[1])
+        RESULTING_CONFIG_ENDPOINTS[endpoint_cooked[0]]['resource_value'] = json.dumps(values, sort_keys=True)
+    for config_rule in RESULTING_CONFIG_ENDPOINTS.values():
+        config_rule = (
+            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'],
+            json.loads(json.dumps(config_rule['resource_value'])))
+        assert config_rule in config_rules
+    for config_rule in DEVICE_EMU_CONFIG_ADDRESSES:
+        config_rule = (
+            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'],
+            json.loads(json.dumps(config_rule['resource_value'])))
+        assert config_rule in config_rules
+
+    # Try to reconfigure...
+
+    DEVICE_EMU_WITH_RECONFIG_RULES = copy.deepcopy(DEVICE_EMU)
+    DEVICE_EMU_WITH_RECONFIG_RULES['device_operational_status'] = \
+        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+    DEVICE_EMU_WITH_RECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_RECONFIG_ADDRESSES)
+    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_RECONFIG_RULES))
+
+    RESULTING_CONFIG_RULES = {cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ENDPOINTS)}
+    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
+        values = json.loads(RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'])
+        values.update(endpoint_cooked[1])
+        RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'] = json.dumps(values, sort_keys=True)
+    RESULTING_CONFIG_RULES.update({cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ADDRESSES)})
+    for reconfig_rule in DEVICE_EMU_RECONFIG_ADDRESSES:
+        if reconfig_rule['action'] == ConfigActionEnum.CONFIGACTION_DELETE:
+            RESULTING_CONFIG_RULES.pop(reconfig_rule['resource_key'], None)
+        else:
+            RESULTING_CONFIG_RULES[reconfig_rule['resource_key']] = reconfig_rule
+    RESULTING_CONFIG_RULES = RESULTING_CONFIG_RULES.values()
+    #LOGGER.info('RESULTING_CONFIG_RULES = {:s}'.format(str(RESULTING_CONFIG_RULES)))
+
+    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    driver_config = json.loads(json.dumps(driver_config)) # prevent integer keys to fail matching with string keys
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+    assert len(driver_config) == len(RESULTING_CONFIG_RULES)
+    for config_rule in RESULTING_CONFIG_RULES:
+        resource = [config_rule['resource_key'], json.loads(config_rule['resource_value'])]
+        assert resource in driver_config
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
+        for config_rule in device_data.device_config.config_rules
+    ]
+    #LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+    #    '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
+    for config_rule in RESULTING_CONFIG_RULES:
+        config_rule = (
+            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'], config_rule['resource_value'])
+        assert config_rule in config_rules
+
+
+def test_device_emulated_monitor(
+    context_client : ContextClient,         # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,           # pylint: disable=redefined-outer-name
+    device_service : DeviceService,         # pylint: disable=redefined-outer-name
+    mock_service : MockService_Dependencies):   # pylint: disable=redefined-outer-name
+
+    device_uuid = DEVICE_EMU_UUID
+    json_device_id = DEVICE_EMU_ID
+    device_id = DeviceId(**json_device_id)
+    device_data = context_client.GetDevice(device_id)
+    LOGGER.info('device_data = \n{:s}'.format(str(device_data)))
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(device_uuid) # we know the driver exists now
+    assert driver is not None
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+    #assert len(driver_config) == len(DEVICE_EMU_ENDPOINTS_COOKED) + len(DEVICE_EMU_CONFIG_ADDRESSES)
+
+    SAMPLING_DURATION_SEC = 10.0
+    SAMPLING_INTERVAL_SEC = 2.0
+
+    MONITORING_SETTINGS_LIST = []
+    KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {}
+    for endpoint in device_data.device_endpoints:
+        endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+        for sample_type_id in endpoint.kpi_sample_types:
+            sample_type_name = str(KpiSampleType.Name(sample_type_id)).upper().replace('KPISAMPLETYPE_', '')
+            kpi_uuid = '{:s}-{:s}-{:s}-kpi_uuid'.format(device_uuid, endpoint_uuid, str(sample_type_id))
+            monitoring_settings = {
+                'kpi_id'        : {'kpi_id': {'uuid': kpi_uuid}},
+                'kpi_descriptor': {
+                    'kpi_description': 'Metric {:s} for Endpoint {:s} in Device {:s}'.format(
+                        sample_type_name, endpoint_uuid, device_uuid),
+                    'kpi_sample_type': sample_type_id,
+                    'device_id': json_device_id,
+                    'endpoint_id': json_endpoint_id(json_device_id, endpoint_uuid),
+                },
+                'sampling_duration_s': SAMPLING_DURATION_SEC,
+                'sampling_interval_s': SAMPLING_INTERVAL_SEC,
+            }
+            MONITORING_SETTINGS_LIST.append(monitoring_settings)
+            KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] = 0
+
+    NUM_SAMPLES_EXPECTED_PER_KPI = SAMPLING_DURATION_SEC / SAMPLING_INTERVAL_SEC
+    NUM_SAMPLES_EXPECTED = len(MONITORING_SETTINGS_LIST) * NUM_SAMPLES_EXPECTED_PER_KPI
+
+    # Start monitoring the device
+    t_start_monitoring = datetime.timestamp(datetime.utcnow())
+    for monitoring_settings in MONITORING_SETTINGS_LIST:
+        device_client.MonitorDeviceKpi(MonitoringSettings(**monitoring_settings))
+
+    # wait to receive the expected number of samples
+    # if takes more than 1.5 times the sampling duration, assume there is an error
+    time_ini = time.time()
+    queue_samples : queue.Queue = mock_service.queue_samples
+    received_samples = []
+    while (len(received_samples) < NUM_SAMPLES_EXPECTED) and (time.time() - time_ini < SAMPLING_DURATION_SEC * 1.5):
+        try:
+            received_sample = queue_samples.get(block=True, timeout=SAMPLING_INTERVAL_SEC / NUM_SAMPLES_EXPECTED)
+            #LOGGER.info('received_sample = {:s}'.format(str(received_sample)))
+            received_samples.append(received_sample)
+        except queue.Empty:
+            continue
+
+    t_end_monitoring = datetime.timestamp(datetime.utcnow())
+
+    #LOGGER.info('received_samples = {:s}'.format(str(received_samples)))
+    LOGGER.info('len(received_samples) = {:s}'.format(str(len(received_samples))))
+    LOGGER.info('NUM_SAMPLES_EXPECTED = {:s}'.format(str(NUM_SAMPLES_EXPECTED)))
+    assert len(received_samples) == NUM_SAMPLES_EXPECTED
+    for received_sample in received_samples:
+        kpi_uuid = received_sample.kpi_id.kpi_id.uuid
+        assert kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED
+        assert isinstance(received_sample.timestamp, str)
+        try:
+            timestamp = float(received_sample.timestamp)
+        except ValueError:
+            dt_time = dateutil.parser.isoparse(received_sample.timestamp).replace(tzinfo=timezone.utc)
+            timestamp = float(calendar.timegm(dt_time.timetuple())) + (dt_time.microsecond / 1.e6)
+        assert timestamp > t_start_monitoring
+        assert timestamp < t_end_monitoring
+        assert received_sample.kpi_value.HasField('floatVal') or received_sample.kpi_value.HasField('intVal')
+        kpi_value = getattr(received_sample.kpi_value, received_sample.kpi_value.WhichOneof('value'))
+        assert isinstance(kpi_value, (float, int))
+        KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] += 1
+
+    LOGGER.info('KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {:s}'.format(str(KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED)))
+    for kpi_uuid, num_samples_received in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED.items():
+        assert num_samples_received == NUM_SAMPLES_EXPECTED_PER_KPI
+
+    # Unsubscribe monitoring
+    for kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED.keys():
+        MONITORING_SETTINGS_UNSUBSCRIBE = {
+            'kpi_id'             : {'kpi_id': {'uuid': kpi_uuid}},
+            'sampling_duration_s': -1, # negative value in sampling_duration_s or sampling_interval_s means unsibscribe
+            'sampling_interval_s': -1, # kpi_id is mandatory to unsibscribe
+        }
+        device_client.MonitorDeviceKpi(MonitoringSettings(**MONITORING_SETTINGS_UNSUBSCRIBE))
+
+
+def test_device_emulated_deconfigure(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_EMU_UUID) # we know the driver exists now
+    assert driver is not None
+
+    driver_config = driver.GetConfig()
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    DEVICE_EMU_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_EMU)
+    DEVICE_EMU_WITH_DECONFIG_RULES['device_operational_status'] = \
+        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+    DEVICE_EMU_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_DECONFIG_ADDRESSES)
+    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_DECONFIG_RULES))
+
+    RESULTING_CONFIG_RULES = {cr['resource_key']:cr for cr in copy.deepcopy(DEVICE_EMU_CONFIG_ENDPOINTS)}
+    for endpoint_cooked in DEVICE_EMU_ENDPOINTS_COOKED:
+        values = json.loads(RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'])
+        values.update(endpoint_cooked[1])
+        RESULTING_CONFIG_RULES[endpoint_cooked[0]]['resource_value'] = json.dumps(values, sort_keys=True)
+    RESULTING_CONFIG_RULES = RESULTING_CONFIG_RULES.values()
+    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    driver_config = json.loads(json.dumps(driver_config)) # prevent integer keys to fail matching with string keys
+    driver_config = list(filter(
+        lambda config_rule: (
+            not isinstance(config_rule[1], str) or not config_rule[1].startswith('do_sampling (trigger:')),
+        driver_config))
+    LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+    LOGGER.info('RESULTING_CONFIG_RULES = {:s}'.format(str(RESULTING_CONFIG_RULES)))
+    assert len(driver_config) == len(RESULTING_CONFIG_RULES)
+    for config_rule in RESULTING_CONFIG_RULES:
+        config_rule = [config_rule['resource_key'], json.loads(config_rule['resource_value'])]
+        #LOGGER.info('config_rule = {:s}'.format(str(config_rule)))
+        assert config_rule in driver_config
+
+    DEVICE_EMU_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_EMU)
+    DEVICE_EMU_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_EMU_DECONFIG_ENDPOINTS)
+    device_client.ConfigureDevice(Device(**DEVICE_EMU_WITH_DECONFIG_RULES))
+
+    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    driver_config = json.loads(json.dumps(driver_config)) # prevent integer keys to fail matching with string keys
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+    assert len(driver_config) == 0
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_EMU_ID))
+    config_rules = device_data.device_config.config_rules
+    LOGGER.info('config_rules = {:s}'.format(str(config_rules)))
+    clean_config_rules = []
+    for config_rule in config_rules:
+        if config_rule.resource_key.startswith('/endpoints/endpoint'): continue
+        config_rule_value = json.loads(config_rule.resource_value)
+        if isinstance(config_rule_value, str) and config_rule_value.startswith('do_sampling (trigger:'): continue
+        clean_config_rules.append(config_rule)
+    LOGGER.info('clean_config_rules = {:s}'.format(str(clean_config_rules)))
+    assert len(clean_config_rules) == 0
+
+
+def test_device_emulated_delete(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    device_client.DeleteDevice(DeviceId(**DEVICE_EMU_ID))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_EMU_UUID, {})
+    assert driver is None
diff --git a/src/device/tests/test_unitary_microwave.py b/src/device/tests/test_unitary_microwave.py
index 8718d99fb9d9d430e49ecd44edc60e5a2e9be339..c5cd70b993971812cd89dd970e9835cfd04a548f 100644
--- a/src/device/tests/test_unitary_microwave.py
+++ b/src/device/tests/test_unitary_microwave.py
@@ -12,211 +12,61 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import calendar, copy, dateutil.parser, grpc, json, logging, operator, os, pytest, queue, time
-from datetime import datetime, timezone
-from typing import Tuple
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
-from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
-from common.message_broker.MessageBroker import MessageBroker
+import copy, grpc, logging, pytest
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
-from context.Config import (
-    GRPC_SERVICE_PORT as CONTEXT_GRPC_SERVICE_PORT, GRPC_MAX_WORKERS as CONTEXT_GRPC_MAX_WORKERS,
-    GRPC_GRACE_PERIOD as CONTEXT_GRPC_GRACE_PERIOD)
 from context.client.ContextClient import ContextClient
-from context.proto.context_pb2 import DeviceId, DeviceOperationalStatusEnum
-from context.service.grpc_server.ContextService import ContextService
-from device.Config import (
-    GRPC_SERVICE_PORT as DEVICE_GRPC_SERVICE_PORT, GRPC_MAX_WORKERS as DEVICE_GRPC_MAX_WORKERS,
-    GRPC_GRACE_PERIOD as DEVICE_GRPC_GRACE_PERIOD)
+from context.proto.context_pb2 import DeviceId
 from device.client.DeviceClient import DeviceClient
-from device.proto.context_pb2 import ConfigActionEnum, Context, Device, Topology
-from device.proto.device_pb2 import MonitoringSettings
-from device.proto.kpi_sample_types_pb2 import KpiSampleType
+from device.proto.context_pb2 import ConfigActionEnum, Device
 from device.service.DeviceService import DeviceService
 from device.service.driver_api._Driver import _Driver
-from device.service.driver_api.DriverFactory import DriverFactory
-from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
-from device.service.drivers import DRIVERS
-from device.tests.MockMonitoringService import MockMonitoringService
-from monitoring.Config import (
-    GRPC_SERVICE_PORT as MONITORING_GRPC_SERVICE_PORT, GRPC_MAX_WORKERS as MONITORING_GRPC_MAX_WORKERS,
-    GRPC_GRACE_PERIOD as MONITORING_GRPC_GRACE_PERIOD)
-from monitoring.client.monitoring_client import MonitoringClient
-from .CommonObjects import CONTEXT, TOPOLOGY
-
-from .Device_Emulated import (
-    DEVICE_EMU, DEVICE_EMU_CONFIG_ADDRESSES, DEVICE_EMU_CONFIG_ENDPOINTS, DEVICE_EMU_CONNECT_RULES,
-    DEVICE_EMU_DECONFIG_ADDRESSES, DEVICE_EMU_DECONFIG_ENDPOINTS, DEVICE_EMU_EP_DESCS, DEVICE_EMU_ENDPOINTS_COOKED,
-    DEVICE_EMU_ID, DEVICE_EMU_RECONFIG_ADDRESSES, DEVICE_EMU_UUID)
-ENABLE_EMULATED = True
-
-try:
-    from .Device_OpenConfig_Infinera1 import(
-    #from .Device_OpenConfig_Infinera2 import(
-        DEVICE_OC, DEVICE_OC_CONFIG_RULES, DEVICE_OC_DECONFIG_RULES, DEVICE_OC_CONNECT_RULES, DEVICE_OC_ID,
-        DEVICE_OC_UUID)
-    ENABLE_OPENCONFIG = True
-except ImportError:
-    ENABLE_OPENCONFIG = False
-
-try:
-    from .Device_Transport_Api_CTTC import (
-        DEVICE_TAPI, DEVICE_TAPI_CONNECT_RULES, DEVICE_TAPI_UUID, DEVICE_TAPI_ID, DEVICE_TAPI_CONFIG_RULES,
-        DEVICE_TAPI_DECONFIG_RULES)
-    ENABLE_TAPI = True
-except ImportError:
-    ENABLE_TAPI = False
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+    # be careful, order of symbols is important here!
+    mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment)
 
 try:
     from .Device_Microwave_Template import (
-        DEVICE_MICROWAVE, DEVICE_MICROWAVE_CONNECT_RULES, DEVICE_MICROWAVE_UUID, DEVICE_MICROWAVE_ID, DEVICE_MICROWAVE_CONFIG_RULES,
-        DEVICE_MICROWAVE_DECONFIG_RULES)
+        DEVICE_MICROWAVE, DEVICE_MICROWAVE_CONNECT_RULES, DEVICE_MICROWAVE_UUID, DEVICE_MICROWAVE_ID,
+        DEVICE_MICROWAVE_CONFIG_RULES, DEVICE_MICROWAVE_DECONFIG_RULES)
     ENABLE_MICROWAVE = True
-except ImportError as error:
-    ENABLE_MICROWAVE = False
-    print(error.__class__.__name__ + ": " + error.message)
-
-from .mock_p4runtime_service import MockP4RuntimeService
-try:
-    from .device_p4 import(
-        DEVICE_P4, DEVICE_P4_ID, DEVICE_P4_UUID, DEVICE_P4_NAME, DEVICE_P4_ADDRESS, DEVICE_P4_PORT, DEVICE_P4_WORKERS,
-        DEVICE_P4_GRACE_PERIOD, DEVICE_P4_CONNECT_RULES, DEVICE_P4_CONFIG_RULES)
-    ENABLE_P4 = True
 except ImportError:
-    ENABLE_P4 = False
-
-#ENABLE_EMULATED   = False # set to False to disable tests of Emulated devices
-#ENABLE_OPENCONFIG = False # set to False to disable tests of OpenConfig devices
-#ENABLE_TAPI       = False # set to False to disable tests of TAPI devices
-#ENABLE_P4         = False # set to False to disable tests of P4 devices
-
-ENABLE_OPENCONFIG_CONFIGURE   = True
-ENABLE_OPENCONFIG_MONITOR     = True
-ENABLE_OPENCONFIG_DECONFIGURE = True
-
-
-logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
-logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
-logging.getLogger('monitoring-client').setLevel(logging.WARNING)
+    ENABLE_MICROWAVE = False
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-CONTEXT_GRPC_SERVICE_PORT = 10000 + CONTEXT_GRPC_SERVICE_PORT # avoid privileged ports
-DEVICE_GRPC_SERVICE_PORT = 10000 + DEVICE_GRPC_SERVICE_PORT # avoid privileged ports
-MONITORING_GRPC_SERVICE_PORT = 10000 + MONITORING_GRPC_SERVICE_PORT # avoid privileged ports
-
-DEFAULT_REDIS_SERVICE_HOST = '127.0.0.1'
-DEFAULT_REDIS_SERVICE_PORT = 6379
-DEFAULT_REDIS_DATABASE_ID  = 0
-
-REDIS_CONFIG = {
-    'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST),
-    'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT),
-    'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID',  DEFAULT_REDIS_DATABASE_ID ),
-}
-
-SCENARIOS = [
-    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          ),
-    #('all_redis',    DatabaseBackendEnum.REDIS,    REDIS_CONFIG, MessageBrokerBackendEnum.REDIS,    REDIS_CONFIG),
-]
-
-@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS)
-def context_db_mb(request) -> Tuple[Database, MessageBroker]:
-    name,db_backend,db_settings,mb_backend,mb_settings = request.param
-    msg = 'Running scenario {:s} db_backend={:s}, db_settings={:s}, mb_backend={:s}, mb_settings={:s}...'
-    LOGGER.info(msg.format(str(name), str(db_backend.value), str(db_settings), str(mb_backend.value), str(mb_settings)))
-    _database = Database(get_database_backend(backend=db_backend, **db_settings))
-    _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings))
-    yield _database, _message_broker
-    _message_broker.terminate()
-
-@pytest.fixture(scope='session')
-def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
-    _service = ContextService(
-        context_db_mb[0], context_db_mb[1], port=CONTEXT_GRPC_SERVICE_PORT, max_workers=CONTEXT_GRPC_MAX_WORKERS,
-        grace_period=CONTEXT_GRPC_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name
-    _client = ContextClient(address='127.0.0.1', port=CONTEXT_GRPC_SERVICE_PORT)
-    yield _client
-    _client.close()
-
-@pytest.fixture(scope='session')
-def monitoring_service():
-    _service = MockMonitoringService(port=MONITORING_GRPC_SERVICE_PORT, max_workers=MONITORING_GRPC_MAX_WORKERS,
-        grace_period=MONITORING_GRPC_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def monitoring_client(monitoring_service : MockMonitoringService): # pylint: disable=redefined-outer-name
-    _client = MonitoringClient(server='127.0.0.1', port=MONITORING_GRPC_SERVICE_PORT)
-    #yield _client
-    #_client.close()
-    return _client
-
-@pytest.fixture(scope='session')
-def device_service(
-    context_client : ContextClient,         # pylint: disable=redefined-outer-name
-    monitoring_client : MonitoringClient):  # pylint: disable=redefined-outer-name
-
-    _driver_factory = DriverFactory(DRIVERS)
-    _driver_instance_cache = DriverInstanceCache(_driver_factory)
-    _service = DeviceService(
-        context_client, monitoring_client, _driver_instance_cache, port=DEVICE_GRPC_SERVICE_PORT,
-        max_workers=DEVICE_GRPC_MAX_WORKERS, grace_period=DEVICE_GRPC_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name
-    _client = DeviceClient(address='127.0.0.1', port=DEVICE_GRPC_SERVICE_PORT)
-    yield _client
-    _client.close()
-
-@pytest.fixture(scope='session')
-def p4runtime_service():
-    _service = MockP4RuntimeService(
-        address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT,
-        max_workers=DEVICE_P4_WORKERS,
-        grace_period=DEVICE_P4_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-
-def test_prepare_environment(
-    context_client : ContextClient,     # pylint: disable=redefined-outer-name
-    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
-    device_service : DeviceService):    # pylint: disable=redefined-outer-name
 
-    context_client.SetContext(Context(**CONTEXT))
-    context_client.SetTopology(Topology(**TOPOLOGY))
+# ----- Test Device Driver Microwave ------------------------------------------------
 
+def test_device_microwave_add_error_cases(
+    device_client : DeviceClient):      # pylint: disable=redefined-outer-name
 
+    if not ENABLE_MICROWAVE: pytest.skip('Skipping test: No TAPI device has been configured')
 
+    with pytest.raises(grpc.RpcError) as e:
+        DEVICE_MICROWAVE_WITH_EXTRA_RULES = copy.deepcopy(DEVICE_MICROWAVE)
+        DEVICE_MICROWAVE_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_MICROWAVE_CONNECT_RULES)
+        DEVICE_MICROWAVE_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_MICROWAVE_CONFIG_RULES)
+        device_client.AddDevice(Device(**DEVICE_MICROWAVE_WITH_EXTRA_RULES))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg_head = 'device.device_config.config_rules(['
+    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
+               'with "_connect/" tag. Others should be configured after adding the device.'
+    except_msg = str(e.value.details())
+    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
 
-# ----- Test Device Driver Microwave ------------------------------------------------
 
 def test_device_microwave_add_correct(
     device_client: DeviceClient,        # pylint: disable=redefined-outer-name
     device_service: DeviceService):     # pylint: disable=redefined-outer-name
 
     if not ENABLE_MICROWAVE: pytest.skip('Skipping test: No MICROWAVE device has been configured')
-    
+
     DEVICE_MICROWAVE_WITH_CONNECT_RULES = copy.deepcopy(DEVICE_MICROWAVE)
     DEVICE_MICROWAVE_WITH_CONNECT_RULES['device_config']['config_rules'].extend(DEVICE_MICROWAVE_CONNECT_RULES)
     device_client.AddDevice(Device(**DEVICE_MICROWAVE_WITH_CONNECT_RULES))
-    driver: _Driver = device_service.driver_instance_cache.get(DEVICE_MICROWAVE_UUID)
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver: _Driver = driver_instance_cache.get(DEVICE_MICROWAVE_UUID)
     assert driver is not None
 
 
@@ -240,7 +90,8 @@ def test_device_microwave_configure(
 
     if not ENABLE_MICROWAVE: pytest.skip('Skipping test: No MICROWAVE device has been configured')
 
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_MICROWAVE_UUID)
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_MICROWAVE_UUID)
     assert driver is not None
 
     # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
@@ -263,8 +114,6 @@ def test_device_microwave_configure(
     LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
         '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
     for config_rule in DEVICE_MICROWAVE_CONFIG_RULES:
-        #import pdb;
-        #pdb. set_trace()
         config_rule = (
             ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'], config_rule['resource_value'])
         assert config_rule in config_rules
@@ -277,7 +126,8 @@ def test_device_microwave_deconfigure(
 
     if not ENABLE_MICROWAVE: pytest.skip('Skipping test: No MICROWAVE device has been configured')
 
-    driver: _Driver = device_service.driver_instance_cache.get(DEVICE_MICROWAVE_UUID)
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver: _Driver = driver_instance_cache.get(DEVICE_MICROWAVE_UUID)
     assert driver is not None
 
     # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
@@ -312,5 +162,6 @@ def test_device_microwave_delete(
     if not ENABLE_MICROWAVE: pytest.skip('Skipping test: No MICROWAVE device has been configured')
 
     device_client.DeleteDevice(DeviceId(**DEVICE_MICROWAVE_ID))
-    driver : _Driver = device_service.driver_instance_cache.get(DEVICE_MICROWAVE_UUID, {})
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_MICROWAVE_UUID, {})
     assert driver is None
\ No newline at end of file
diff --git a/src/device/tests/test_unitary_openconfig.py b/src/device/tests/test_unitary_openconfig.py
new file mode 100644
index 0000000000000000000000000000000000000000..968272c04a1304b313d8a988ce0a432e3749a9b8
--- /dev/null
+++ b/src/device/tests/test_unitary_openconfig.py
@@ -0,0 +1,299 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import calendar, copy, dateutil.parser, grpc, logging, pytest, queue, time
+from datetime import datetime, timezone
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.EndPoint import json_endpoint_id
+from context.client.ContextClient import ContextClient
+from context.proto.context_pb2 import DeviceId
+from device.client.DeviceClient import DeviceClient
+from device.proto.context_pb2 import ConfigActionEnum, Device
+from device.proto.device_pb2 import MonitoringSettings
+from device.proto.kpi_sample_types_pb2 import KpiSampleType
+from device.service.DeviceService import DeviceService
+from device.service.driver_api._Driver import _Driver
+from .MockService_Dependencies import MockService_Dependencies
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+    # be careful, order of symbols is important here!
+    mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment)
+
+try:
+    from .Device_OpenConfig_Infinera1 import(
+    #from .Device_OpenConfig_Infinera2 import(
+        DEVICE_OC, DEVICE_OC_CONFIG_RULES, DEVICE_OC_DECONFIG_RULES, DEVICE_OC_CONNECT_RULES, DEVICE_OC_ID,
+        DEVICE_OC_UUID)
+    ENABLE_OPENCONFIG = True
+except ImportError:
+    ENABLE_OPENCONFIG = False
+
+ENABLE_OPENCONFIG_CONFIGURE   = True
+ENABLE_OPENCONFIG_MONITOR     = True
+ENABLE_OPENCONFIG_DECONFIGURE = True
+
+
+logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
+logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
+logging.getLogger('monitoring-client').setLevel(logging.WARNING)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+# ----- Test Device Driver OpenConfig ------------------------------------------
+
+def test_device_openconfig_add_error_cases(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
+
+    with pytest.raises(grpc.RpcError) as e:
+        DEVICE_OC_WITH_EXTRA_RULES = copy.deepcopy(DEVICE_OC)
+        DEVICE_OC_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONNECT_RULES)
+        DEVICE_OC_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONFIG_RULES)
+        device_client.AddDevice(Device(**DEVICE_OC_WITH_EXTRA_RULES))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg_head = 'device.device_config.config_rules(['
+    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
+               'with "_connect/" tag. Others should be configured after adding the device.'
+    except_msg = str(e.value.details())
+    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
+
+
+def test_device_openconfig_add_correct(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
+
+    DEVICE_OC_WITH_CONNECT_RULES = copy.deepcopy(DEVICE_OC)
+    DEVICE_OC_WITH_CONNECT_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONNECT_RULES)
+    device_client.AddDevice(Device(**DEVICE_OC_WITH_CONNECT_RULES))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_OC_UUID) # we know the driver exists now
+    assert driver is not None
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
+        for config_rule in device_data.device_config.config_rules
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
+
+
+def test_device_openconfig_get(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
+
+    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_OC_ID))
+    LOGGER.info('initial_config = {:s}'.format(grpc_message_to_json_string(initial_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
+    LOGGER.info('device_data = {:s}'.format(grpc_message_to_json_string(device_data)))
+
+
+def test_device_openconfig_configure(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
+    if not ENABLE_OPENCONFIG_CONFIGURE: pytest.skip('Skipping test OpenConfig configure')
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_OC_UUID) # we know the driver exists now
+    assert driver is not None
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    DEVICE_OC_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_OC)
+    DEVICE_OC_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_OC_CONFIG_RULES)
+    device_client.ConfigureDevice(Device(**DEVICE_OC_WITH_CONFIG_RULES))
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
+        for config_rule in device_data.device_config.config_rules
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
+    for config_rule in DEVICE_OC_CONFIG_RULES:
+        config_rule = (
+            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'], config_rule['resource_value'])
+        assert config_rule in config_rules
+
+
+def test_device_openconfig_monitor(
+    context_client : ContextClient,             # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,               # pylint: disable=redefined-outer-name
+    device_service : DeviceService,             # pylint: disable=redefined-outer-name
+    mock_service : MockService_Dependencies):   # pylint: disable=redefined-outer-name
+
+    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
+    if not ENABLE_OPENCONFIG_MONITOR: pytest.skip('Skipping test OpenConfig monitor')
+
+    device_uuid = DEVICE_OC_UUID
+    json_device_id = DEVICE_OC_ID
+    device_id = DeviceId(**json_device_id)
+    device_data = context_client.GetDevice(device_id)
+    #LOGGER.info('device_data = \n{:s}'.format(str(device_data)))
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(device_uuid) # we know the driver exists now
+    assert driver is not None
+
+    SAMPLING_DURATION_SEC = 60.0
+    SAMPLING_INTERVAL_SEC = 15.0
+
+    MONITORING_SETTINGS_LIST = []
+    KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {}
+    for endpoint in device_data.device_endpoints:
+        endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+        for sample_type_id in endpoint.kpi_sample_types:
+            sample_type_name = str(KpiSampleType.Name(sample_type_id)).upper().replace('KPISAMPLETYPE_', '')
+            kpi_uuid = '{:s}-{:s}-{:s}-kpi_uuid'.format(device_uuid, endpoint_uuid, str(sample_type_id))
+            monitoring_settings = {
+                'kpi_id'        : {'kpi_id': {'uuid': kpi_uuid}},
+                'kpi_descriptor': {
+                    'kpi_description': 'Metric {:s} for Endpoint {:s} in Device {:s}'.format(
+                        sample_type_name, endpoint_uuid, device_uuid),
+                    'kpi_sample_type': sample_type_id,
+                    'device_id': json_device_id,
+                    'endpoint_id': json_endpoint_id(json_device_id, endpoint_uuid),
+                },
+                'sampling_duration_s': SAMPLING_DURATION_SEC,
+                'sampling_interval_s': SAMPLING_INTERVAL_SEC,
+            }
+            MONITORING_SETTINGS_LIST.append(monitoring_settings)
+            KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] = 0
+
+    NUM_SAMPLES_EXPECTED_PER_KPI = SAMPLING_DURATION_SEC / SAMPLING_INTERVAL_SEC
+    NUM_SAMPLES_EXPECTED = len(MONITORING_SETTINGS_LIST) * NUM_SAMPLES_EXPECTED_PER_KPI
+
+    # Start monitoring the device
+    t_start_monitoring = datetime.timestamp(datetime.utcnow())
+    for monitoring_settings in MONITORING_SETTINGS_LIST:
+        device_client.MonitorDeviceKpi(MonitoringSettings(**monitoring_settings))
+
+    # wait to receive the expected number of samples
+    # if takes more than 1.5 times the sampling duration, assume there is an error
+    time_ini = time.time()
+    queue_samples : queue.Queue = mock_service.queue_samples
+    received_samples = []
+    while (len(received_samples) < NUM_SAMPLES_EXPECTED) and (time.time() - time_ini < SAMPLING_DURATION_SEC * 1.5):
+        try:
+            received_sample = queue_samples.get(block=True, timeout=SAMPLING_INTERVAL_SEC / NUM_SAMPLES_EXPECTED)
+            #LOGGER.info('received_sample = {:s}'.format(str(received_sample)))
+            received_samples.append(received_sample)
+        except queue.Empty:
+            continue
+
+    t_end_monitoring = datetime.timestamp(datetime.utcnow())
+
+    #LOGGER.info('received_samples = {:s}'.format(str(received_samples)))
+    LOGGER.info('len(received_samples) = {:s}'.format(str(len(received_samples))))
+    LOGGER.info('NUM_SAMPLES_EXPECTED = {:s}'.format(str(NUM_SAMPLES_EXPECTED)))
+    #assert len(received_samples) == NUM_SAMPLES_EXPECTED
+    for received_sample in received_samples:
+        kpi_uuid = received_sample.kpi_id.kpi_id.uuid
+        assert kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED
+        assert isinstance(received_sample.timestamp, str)
+        try:
+            timestamp = float(received_sample.timestamp)
+        except ValueError:
+            dt_time = dateutil.parser.isoparse(received_sample.timestamp).replace(tzinfo=timezone.utc)
+            timestamp = float(calendar.timegm(dt_time.timetuple())) + (dt_time.microsecond / 1.e6)
+        assert timestamp > t_start_monitoring
+        assert timestamp < t_end_monitoring
+        assert received_sample.kpi_value.HasField('floatVal') or received_sample.kpi_value.HasField('intVal')
+        kpi_value = getattr(received_sample.kpi_value, received_sample.kpi_value.WhichOneof('value'))
+        assert isinstance(kpi_value, (float, int))
+        KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED[kpi_uuid] += 1
+
+    LOGGER.info('KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED = {:s}'.format(str(KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED)))
+    # TODO: review why num_samples_received per KPI != NUM_SAMPLES_EXPECTED_PER_KPI
+    #for kpi_uuid, num_samples_received in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED.items():
+    #    assert num_samples_received == NUM_SAMPLES_EXPECTED_PER_KPI
+
+    # Unsubscribe monitoring
+    for kpi_uuid in KPI_UUIDS__TO__NUM_SAMPLES_RECEIVED:
+        MONITORING_SETTINGS_UNSUBSCRIBE = {
+            'kpi_id'             : {'kpi_id': {'uuid': kpi_uuid}},
+            'sampling_duration_s': -1, # negative value in sampling_duration_s or sampling_interval_s means unsibscribe
+            'sampling_interval_s': -1, # kpi_id is mandatory to unsibscribe
+        }
+        device_client.MonitorDeviceKpi(MonitoringSettings(**MONITORING_SETTINGS_UNSUBSCRIBE))
+
+
+def test_device_openconfig_deconfigure(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
+    if not ENABLE_OPENCONFIG_DECONFIGURE: pytest.skip('Skipping test OpenConfig deconfigure')
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_OC_UUID) # we know the driver exists now
+    assert driver is not None
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    DEVICE_OC_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_OC)
+    DEVICE_OC_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_OC_DECONFIG_RULES)
+    device_client.ConfigureDevice(Device(**DEVICE_OC_WITH_DECONFIG_RULES))
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
+        for config_rule in device_data.device_config.config_rules
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
+    for config_rule in DEVICE_OC_DECONFIG_RULES:
+        action_set = ConfigActionEnum.Name(ConfigActionEnum.CONFIGACTION_SET)
+        config_rule = (action_set, config_rule['resource_key'], config_rule['resource_value'])
+        assert config_rule not in config_rules
+
+
+def test_device_openconfig_delete(
+    context_client : ContextClient,     # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    if not ENABLE_OPENCONFIG: pytest.skip('Skipping test: No OpenConfig device has been configured')
+
+    device_client.DeleteDevice(DeviceId(**DEVICE_OC_ID))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_OC_UUID, {})
+    assert driver is None
diff --git a/src/device/tests/test_unitary_p4.py b/src/device/tests/test_unitary_p4.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8a5d37b8f01685f10ab2d2ec967d09312fc4cc4
--- /dev/null
+++ b/src/device/tests/test_unitary_p4.py
@@ -0,0 +1,146 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, logging, pytest
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from context.proto.context_pb2 import DeviceId
+from device.client.DeviceClient import DeviceClient
+from device.proto.context_pb2 import Device
+from device.service.DeviceService import DeviceService
+from device.service.driver_api._Driver import _Driver
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+    # be careful, order of symbols is important here!
+    mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment)
+
+from .mock_p4runtime_service import MockP4RuntimeService
+try:
+    from .device_p4 import(
+        DEVICE_P4, DEVICE_P4_ID, DEVICE_P4_UUID, DEVICE_P4_ADDRESS, DEVICE_P4_PORT, DEVICE_P4_WORKERS,
+        DEVICE_P4_GRACE_PERIOD, DEVICE_P4_CONNECT_RULES, DEVICE_P4_CONFIG_RULES)
+    ENABLE_P4 = True
+except ImportError:
+    ENABLE_P4 = False
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+@pytest.fixture(scope='session')
+def p4runtime_service():
+    _service = MockP4RuntimeService(
+        address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT,
+        max_workers=DEVICE_P4_WORKERS,
+        grace_period=DEVICE_P4_GRACE_PERIOD)
+    _service.start()
+    yield _service
+    _service.stop()
+
+
+# ----- Test Device Driver P4 --------------------------------------------------
+
+def test_device_p4_add_error_cases(
+        context_client: ContextClient,   # pylint: disable=redefined-outer-name
+        device_client: DeviceClient,     # pylint: disable=redefined-outer-name
+        device_service: DeviceService):  # pylint: disable=redefined-outer-name
+
+    if not ENABLE_P4: pytest.skip(
+        'Skipping test: No P4 device has been configured')
+
+    with pytest.raises(grpc.RpcError) as e:
+        device_p4_with_extra_rules = copy.deepcopy(DEVICE_P4)
+        device_p4_with_extra_rules['device_config']['config_rules'].extend(
+            DEVICE_P4_CONNECT_RULES)
+        device_p4_with_extra_rules['device_config']['config_rules'].extend(
+            DEVICE_P4_CONFIG_RULES)
+        device_client.AddDevice(Device(**device_p4_with_extra_rules))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg_head = 'device.device_config.config_rules(['
+    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
+               'with "_connect/" tag. Others should be configured after adding the device.'
+    except_msg = str(e.value.details())
+    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
+
+
+def test_device_p4_add_correct(
+        context_client: ContextClient,              # pylint: disable=redefined-outer-name
+        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
+        device_service: DeviceService,              # pylint: disable=redefined-outer-name
+        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+
+    if not ENABLE_P4: pytest.skip(
+        'Skipping test: No P4 device has been configured')
+
+    device_p4_with_connect_rules = copy.deepcopy(DEVICE_P4)
+    device_p4_with_connect_rules['device_config']['config_rules'].extend(
+        DEVICE_P4_CONNECT_RULES)
+    device_client.AddDevice(Device(**device_p4_with_connect_rules))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
+    assert driver is not None
+
+
+def test_device_p4_get(
+        context_client: ContextClient,              # pylint: disable=redefined-outer-name
+        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
+        device_service: DeviceService,              # pylint: disable=redefined-outer-name
+        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+
+    if not ENABLE_P4: pytest.skip(
+        'Skipping test: No P4 device has been configured')
+
+    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_P4_ID))
+    LOGGER.info('initial_config = {:s}'.format(
+        grpc_message_to_json_string(initial_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
+    LOGGER.info('device_data = {:s}'.format(
+        grpc_message_to_json_string(device_data)))
+
+
+def test_device_p4_configure(
+        context_client: ContextClient,              # pylint: disable=redefined-outer-name
+        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
+        device_service: DeviceService,              # pylint: disable=redefined-outer-name
+        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+
+    if not ENABLE_P4: pytest.skip(
+        'Skipping test: No P4 device has been configured')
+
+    pytest.skip('Skipping test for unimplemented method')
+
+
+def test_device_p4_deconfigure(
+        context_client: ContextClient,              # pylint: disable=redefined-outer-name
+        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
+        device_service: DeviceService,              # pylint: disable=redefined-outer-name
+        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+
+    if not ENABLE_P4: pytest.skip(
+        'Skipping test: No P4 device has been configured')
+
+    pytest.skip('Skipping test for unimplemented method')
+
+
+def test_device_p4_delete(
+        context_client: ContextClient,              # pylint: disable=redefined-outer-name
+        device_client: DeviceClient,                # pylint: disable=redefined-outer-name
+        device_service: DeviceService,              # pylint: disable=redefined-outer-name
+        p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+
+    if not ENABLE_P4: pytest.skip('Skipping test: No P4 device has been configured')
+
+    device_client.DeleteDevice(DeviceId(**DEVICE_P4_ID))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
+    assert driver is None
diff --git a/src/device/tests/test_unitary_tapi.py b/src/device/tests/test_unitary_tapi.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce01619ce6b8144ac81ddd73c700310e23c0b52e
--- /dev/null
+++ b/src/device/tests/test_unitary_tapi.py
@@ -0,0 +1,167 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, logging, pytest
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from context.proto.context_pb2 import DeviceId
+from device.client.DeviceClient import DeviceClient
+from device.proto.context_pb2 import ConfigActionEnum, Device
+from device.service.DeviceService import DeviceService
+from device.service.driver_api._Driver import _Driver
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+    # be careful, order of symbols is important here!
+    mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment)
+
+try:
+    from .Device_Transport_Api_CTTC import (
+        DEVICE_TAPI, DEVICE_TAPI_CONNECT_RULES, DEVICE_TAPI_UUID, DEVICE_TAPI_ID, DEVICE_TAPI_CONFIG_RULES,
+        DEVICE_TAPI_DECONFIG_RULES)
+    ENABLE_TAPI = True
+except ImportError:
+    ENABLE_TAPI = False
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+# ----- Test Device Driver TAPI ------------------------------------------------
+
+def test_device_tapi_add_error_cases(
+    device_client : DeviceClient):      # pylint: disable=redefined-outer-name
+
+    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
+
+    with pytest.raises(grpc.RpcError) as e:
+        DEVICE_TAPI_WITH_EXTRA_RULES = copy.deepcopy(DEVICE_TAPI)
+        DEVICE_TAPI_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONNECT_RULES)
+        DEVICE_TAPI_WITH_EXTRA_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONFIG_RULES)
+        device_client.AddDevice(Device(**DEVICE_TAPI_WITH_EXTRA_RULES))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg_head = 'device.device_config.config_rules(['
+    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
+               'with "_connect/" tag. Others should be configured after adding the device.'
+    except_msg = str(e.value.details())
+    assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
+
+
+def test_device_tapi_add_correct(
+    device_client: DeviceClient,        # pylint: disable=redefined-outer-name
+    device_service: DeviceService):     # pylint: disable=redefined-outer-name
+
+    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
+
+    DEVICE_TAPI_WITH_CONNECT_RULES = copy.deepcopy(DEVICE_TAPI)
+    DEVICE_TAPI_WITH_CONNECT_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONNECT_RULES)
+    device_client.AddDevice(Device(**DEVICE_TAPI_WITH_CONNECT_RULES))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver: _Driver = driver_instance_cache.get(DEVICE_TAPI_UUID)
+    assert driver is not None
+
+
+def test_device_tapi_get(
+    context_client: ContextClient,      # pylint: disable=redefined-outer-name
+    device_client: DeviceClient):       # pylint: disable=redefined-outer-name
+
+    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
+
+    initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_TAPI_ID))
+    LOGGER.info('initial_config = {:s}'.format(grpc_message_to_json_string(initial_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_TAPI_ID))
+    LOGGER.info('device_data = {:s}'.format(grpc_message_to_json_string(device_data)))
+
+
+def test_device_tapi_configure(
+    context_client: ContextClient,      # pylint: disable=redefined-outer-name
+    device_client: DeviceClient,        # pylint: disable=redefined-outer-name
+    device_service: DeviceService):     # pylint: disable=redefined-outer-name
+
+    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_TAPI_UUID)
+    assert driver is not None
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    DEVICE_TAPI_WITH_CONFIG_RULES = copy.deepcopy(DEVICE_TAPI)
+    DEVICE_TAPI_WITH_CONFIG_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_CONFIG_RULES)
+    device_client.ConfigureDevice(Device(**DEVICE_TAPI_WITH_CONFIG_RULES))
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_TAPI_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
+        for config_rule in device_data.device_config.config_rules
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
+    for config_rule in DEVICE_TAPI_CONFIG_RULES:
+        config_rule = (
+            ConfigActionEnum.Name(config_rule['action']), config_rule['resource_key'], config_rule['resource_value'])
+        assert config_rule in config_rules
+
+
+def test_device_tapi_deconfigure(
+    context_client: ContextClient,      # pylint: disable=redefined-outer-name
+    device_client: DeviceClient,        # pylint: disable=redefined-outer-name
+    device_service: DeviceService):     # pylint: disable=redefined-outer-name
+
+    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver: _Driver = driver_instance_cache.get(DEVICE_TAPI_UUID)
+    assert driver is not None
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    DEVICE_TAPI_WITH_DECONFIG_RULES = copy.deepcopy(DEVICE_TAPI)
+    DEVICE_TAPI_WITH_DECONFIG_RULES['device_config']['config_rules'].extend(DEVICE_TAPI_DECONFIG_RULES)
+    device_client.ConfigureDevice(Device(**DEVICE_TAPI_WITH_DECONFIG_RULES))
+
+    # Requires to retrieve data from device; might be slow. Uncomment only when needed and test does not pass directly.
+    #driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    #LOGGER.info('driver_config = {:s}'.format(str(driver_config)))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_TAPI_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value)
+        for config_rule in device_data.device_config.config_rules
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules])))
+    for config_rule in DEVICE_TAPI_DECONFIG_RULES:
+        action_set = ConfigActionEnum.Name(ConfigActionEnum.CONFIGACTION_SET)
+        config_rule = (action_set, config_rule['resource_key'], config_rule['resource_value'])
+        assert config_rule not in config_rules
+
+
+def test_device_tapi_delete(
+    device_client : DeviceClient,       # pylint: disable=redefined-outer-name
+    device_service : DeviceService):    # pylint: disable=redefined-outer-name
+
+    if not ENABLE_TAPI: pytest.skip('Skipping test: No TAPI device has been configured')
+
+    device_client.DeleteDevice(DeviceId(**DEVICE_TAPI_ID))
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver : _Driver = driver_instance_cache.get(DEVICE_TAPI_UUID, {})
+    assert driver is None
diff --git a/src/interdomain/client/InterdomainClient.py b/src/interdomain/client/InterdomainClient.py
index 345dfa3ec8b6edd8ca3c4c840e2368ec5d3faf12..a34f31537e2dab3747c5499c640b0b73a4392cf9 100644
--- a/src/interdomain/client/InterdomainClient.py
+++ b/src/interdomain/client/InterdomainClient.py
@@ -13,6 +13,8 @@
 # limitations under the License.
 
 import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from interdomain.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatus, TeraFlowController
@@ -24,8 +26,10 @@ DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
 RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
 
 class InterdomainClient:
-    def __init__(self, address, port):
-        self.endpoint = '{:s}:{:s}'.format(str(address), str(port))
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.INTERDOMAIN)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
         LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
         self.channel = None
         self.stub = None
diff --git a/src/interdomain/service/InterdomainService.py b/src/interdomain/service/InterdomainService.py
index debc943cf17ef2444b231d9a7f10338e5bc5f5b6..cca6bcb85869b53d644e510e8581de20f1e9c825 100644
--- a/src/interdomain/service/InterdomainService.py
+++ b/src/interdomain/service/InterdomainService.py
@@ -12,65 +12,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
-from concurrent import futures
-from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
-from grpc_health.v1.health_pb2 import HealthCheckResponse
-from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
-from context.client.ContextClient import ContextClient
-from interdomain.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
 from interdomain.proto.interdomain_pb2_grpc import add_InterdomainServiceServicer_to_server
-from slice.client.SliceClient import SliceClient
 from .InterdomainServiceServicerImpl import InterdomainServiceServicerImpl
 from .RemoteDomainClients import RemoteDomainClients
 
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
+class InterdomainService(GenericGrpcService):
+    def __init__(self, remote_domain_clients : RemoteDomainClients, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)
+        super().__init__(port, cls_name=cls_name)
+        self.interdomain_servicer = InterdomainServiceServicerImpl(remote_domain_clients)
 
-class InterdomainService:
-    def __init__(
-        self, context_client : ContextClient, slice_client : SliceClient, remote_domain_clients : RemoteDomainClients,
-        address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD
-    ):
-        self.context_client = context_client
-        self.slice_client = slice_client
-        self.remote_domain_clients = remote_domain_clients
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.interdomain_servicer = None
-        self.health_servicer = None
-        self.pool = None
-        self.server = None
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
-
-        self.interdomain_servicer = InterdomainServiceServicerImpl(
-            self.context_client, self.slice_client, self.remote_domain_clients)
+    def install_servicers(self):
         add_InterdomainServiceServicer_to_server(self.interdomain_servicer, self.server)
-
-        self.health_servicer = HealthServicer(
-            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
-        add_HealthServicer_to_server(self.health_servicer, self.server)
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        LOGGER.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-        self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
-
-        LOGGER.debug('Service started')
-
-    def stop(self):
-        LOGGER.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.health_servicer.enter_graceful_shutdown()
-        self.server.stop(self.grace_period)
-        LOGGER.debug('Service stopped')
diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py
index e76297625e344f3dd46fbdd951e9705e7b171d36..20ae74eef816fdb3fbf7352913673cb51f222ba8 100644
--- a/src/interdomain/service/InterdomainServiceServicerImpl.py
+++ b/src/interdomain/service/InterdomainServiceServicerImpl.py
@@ -14,7 +14,7 @@
 
 import grpc, logging
 from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
-from common.tools.grpc.Tools import grpc_message_to_json_string
+#from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from context.proto.context_pb2 import SliceStatusEnum
 from interdomain.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatus, TeraFlowController
@@ -29,18 +29,16 @@ METHOD_NAMES = ['RequestSlice', 'Authenticate', 'LookUpSlice', 'OrderSliceFromCa
 METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
 
 class InterdomainServiceServicerImpl(InterdomainServiceServicer):
-    def __init__(
-        self, context_client : ContextClient, slice_client : SliceClient,
-        remote_domain_clients : RemoteDomainClients
-    ):
+    def __init__(self, remote_domain_clients : RemoteDomainClients):
         LOGGER.debug('Creating Servicer...')
-        self.context_client = context_client
-        self.slice_client = slice_client
         self.remote_domain_clients = remote_domain_clients
         LOGGER.debug('Servicer Created')
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
+        context_client = ContextClient()
+        slice_client = SliceClient()
+
         domains_to_endpoints = {}
         local_domain_uuid = None
         for slice_endpoint_id in request.slice_endpoint_ids:
@@ -90,7 +88,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
                 if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE:
                     raise Exception('Remote Slice creation failed. Wrong Slice status returned')
 
-            #self.context_client.SetSlice(remote_slice)
+            #context_client.SetSlice(remote_slice)
             #subslice_id = reply.slice_subslice_ids.add()
             #subslice_id.CopyFrom(remote_slice.slice_id)
 
@@ -112,7 +110,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
             slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2'
             slice_endpoint_id.endpoint_uuid.uuid = '2/1'
 
-        local_slice_reply = self.slice_client.CreateSlice(local_slice_request)
+        local_slice_reply = slice_client.CreateSlice(local_slice_request)
         if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member
             raise Exception('Local Slice creation failed. Wrong Slice Id was returned')
 
@@ -120,7 +118,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
         subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid
         subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid
 
-        self.context_client.SetSlice(reply)
+        context_client.SetSlice(reply)
         return reply.slice_id
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
@@ -133,7 +131,8 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def LookUpSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
         try:
-            slice_ = self.context_client.GetSlice(request.slice_id)
+            context_client = ContextClient()
+            slice_ = context_client.GetSlice(request.slice_id)
             return slice_.slice_id
         except grpc.RpcError:
             #LOGGER.exception('Unable to get slice({:s})'.format(grpc_message_to_json_string(request.slice_id)))
@@ -146,7 +145,9 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice:
-        reply = self.slice_client.CreateSlice(request)
+        context_client = ContextClient()
+        slice_client = SliceClient()
+        reply = slice_client.CreateSlice(request)
         if reply != request.slice_id: # pylint: disable=no-member
             raise Exception('Slice creation failed. Wrong Slice Id was returned')
-        return self.context_client.GetSlice(request.slice_id)
+        return context_client.GetSlice(request.slice_id)
diff --git a/src/interdomain/service/RemoteDomainClients.py b/src/interdomain/service/RemoteDomainClients.py
index 709aa3c07e545ab5babf8d7e051fa7120fdb7a99..8fde3f4422b2febc7374f08536c0015bfd4719e4 100644
--- a/src/interdomain/service/RemoteDomainClients.py
+++ b/src/interdomain/service/RemoteDomainClients.py
@@ -26,16 +26,16 @@ class RemoteDomainClients:
         self.peer_domain = {}
 
     def add_peer(
-            self, domain_name : str, address : str, port : int, context_uuid : str = DEFAULT_CONTEXT_UUID
+            self, domain_name : str, host : str, port : int, context_uuid : str = DEFAULT_CONTEXT_UUID
         ) -> None:
         while True:
             try:
-                remote_teraflow_ip = socket.gethostbyname(address)
+                remote_teraflow_ip = socket.gethostbyname(host)
                 if len(remote_teraflow_ip) > 0: break
             except socket.gaierror as e:
                 if str(e) == '[Errno -2] Name or service not known': continue
 
-        interdomain_client = InterdomainClient(address, port)
+        interdomain_client = InterdomainClient(host=host, port=port)
         request = TeraFlowController()
         request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID # pylint: disable=no-member
         request.ip_address = get_setting('INTERDOMAINSERVICE_SERVICE_HOST', default='0.0.0.0')
diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py
index ff19271ee4fee7fb975294e11b21de18060607f4..c0a078f4ded85ab957011d21d56c97c8d303dc2a 100644
--- a/src/interdomain/service/__main__.py
+++ b/src/interdomain/service/__main__.py
@@ -14,14 +14,12 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_setting, wait_for_environment_variables
-from context.client.ContextClient import ContextClient
-from interdomain.service.RemoteDomainClients import RemoteDomainClients
-from interdomain.Config import (
-    CONTEXT_SERVICE_HOST, CONTEXT_SERVICE_PORT, SLICE_SERVICE_HOST, SLICE_SERVICE_PORT, GRPC_SERVICE_PORT,
-    GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, LOG_LEVEL, METRICS_PORT)
-from slice.client.SliceClient import SliceClient
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    get_service_port_grpc, wait_for_environment_variables)
 from .InterdomainService import InterdomainService
+from .RemoteDomainClients import RemoteDomainClients
 
 terminate = threading.Event()
 LOGGER : logging.Logger = None
@@ -33,55 +31,36 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
 def main():
     global LOGGER # pylint: disable=global-statement
 
-    grpc_service_port       = get_setting('INTERDOMAINSERVICE_SERVICE_PORT_GRPC', default=GRPC_SERVICE_PORT      )
-    max_workers             = get_setting('MAX_WORKERS',                          default=GRPC_MAX_WORKERS       )
-    grace_period            = get_setting('GRACE_PERIOD',                         default=GRPC_GRACE_PERIOD      )
-    log_level               = get_setting('LOG_LEVEL',                            default=LOG_LEVEL              )
-    metrics_port            = get_setting('METRICS_PORT',                         default=METRICS_PORT           )
-
+    log_level = get_log_level()
     logging.basicConfig(level=log_level)
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        'CONTEXTSERVICE_SERVICE_HOST', 'CONTEXTSERVICE_SERVICE_PORT_GRPC',
-        'SLICESERVICE_SERVICE_HOST', 'SLICESERVICE_SERVICE_PORT_GRPC',
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
-    context_service_host    = get_setting('CONTEXTSERVICE_SERVICE_HOST',          default=CONTEXT_SERVICE_HOST   )
-    context_service_port    = get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC',     default=CONTEXT_SERVICE_PORT   )
-    slice_service_host      = get_setting('SLICESERVICE_SERVICE_HOST',            default=SLICE_SERVICE_HOST     )
-    slice_service_port      = get_setting('SLICESERVICE_SERVICE_PORT_GRPC',       default=SLICE_SERVICE_PORT     )
-
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
     LOGGER.info('Starting...')
 
     # Start metrics server
+    metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
-    # Initialize Context Client
-    if context_service_host is None or context_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Context component'.format(
-            str(context_service_host), str(context_service_port)))
-    context_client = ContextClient(context_service_host, context_service_port)
-
-    # Initialize Slice Client
-    if slice_service_host is None or slice_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Slice component'.format(
-            str(slice_service_host), str(slice_service_port)))
-    slice_client = SliceClient(slice_service_host, slice_service_port)
-
     # Define remote domain clients
     remote_domain_clients = RemoteDomainClients()
 
     # Starting Interdomain service
-    grpc_service = InterdomainService(
-        context_client, slice_client, remote_domain_clients, port=grpc_service_port, max_workers=max_workers,
-        grace_period=grace_period)
+    grpc_service = InterdomainService(remote_domain_clients)
     grpc_service.start()
 
-    remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', GRPC_SERVICE_PORT)
+    # TODO: improve with configuration the definition of the remote peers
+    interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)
+    remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc)
 
     # Wait for Ctrl+C or termination signal
     while not terminate.wait(timeout=0.1): pass
diff --git a/src/interdomain/tests/test_unitary.py b/src/interdomain/tests/test_unitary.py
index bcc6bb9c9cae702f69b0c8f62e483f57d396fb6e..7fe1acc7cec5cbf3663dc7db68c45a22b56d3a6d 100644
--- a/src/interdomain/tests/test_unitary.py
+++ b/src/interdomain/tests/test_unitary.py
@@ -13,134 +13,131 @@
 # limitations under the License.
 
 
-import logging, grpc
-import os
-import sqlite3
-
-import pytest
-from typing import Tuple
-
-from interdomain.proto import context_pb2, kpi_sample_types_pb2, monitoring_pb2
-from interdomain.client.interdomain_client import InterdomainClient
-from interdomain.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
-from interdomain.service.InterdomainService import InterdomainService
-
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
-from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
-from common.message_broker.MessageBroker import MessageBroker
-
-LOGGER = logging.getLogger(__name__)
-LOGGER.setLevel(logging.DEBUG)
-
-###########################
-# Tests Setup
-###########################
-
-SERVER_ADDRESS = '127.0.0.1'
-LISTEN_ADDRESS = '[::]'
-GRPC_PORT_MONITORING = 9090
-
-GRPC_PORT_CONTEXT    = 10000 + grpc_port_context    # avoid privileged ports
-
-SCENARIOS = [ # comment/uncomment scenarios to activate/deactivate them in the test unit
-    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          ),
-]
-
-
-# This fixture will be requested by test cases and last during testing session
-@pytest.fixture(scope='session')
-def interdomain_service():
-    LOGGER.warning('interdomain_service begin')
-
-    interdomain_port    = GRPC_INTERDOMAIN_PORT
-    max_workers     = GRPC_MAX_WORKERS
-    grace_period    = GRPC_GRACE_PERIOD
-
-    LOGGER.info('Initializing InterdomainService...')
-    grpc_service = InterdomainService(port=interdomain_port, max_workers=max_workers, grace_period=grace_period)
-    server = grpc_service.start()
-
-    # yield the server, when test finishes, execution will resume to stop it
-    LOGGER.warning('interdomain_service yielding')
-    yield server
-
-    LOGGER.info('Terminating InterdomainService...')
-    grpc_service.stop()
-
-# This fixture will be requested by test cases and last during testing session.
-# The client requires the server, so client fixture has the server as dependency.
-@pytest.fixture(scope='session')
-def interdomain_client(interdomain_service):
-    LOGGER.warning('interdomain_client begin')
-    client = InterdomainClient(server=SERVER_ADDRESS, port=GRPC_PORT_INTERDOMAIN)  # instantiate the client
-    LOGGER.warning('interdomain_client returning')
-    return client
-
-# This fixture will be requested by test cases and last during testing session.
-@pytest.fixture(scope='session')
-def create_TeraFlowController():
-    LOGGER.warning('create_TeraFlowController begin')
-    # form request
-    tf_ctl                  = context_pb2.TeraFlowController()
-    tf_ctl.context_id       = context_pb2.ContextId()
-    tf_ctl.context_id.context_uuid = context_pb2.Uuid()
-    tf_ctl.context_id.context_uuid.uuid = str(1) 
-    tf_ctl.ip_address       = "127.0.0.1"
-    tf_ctl.port      	    = 9090
-    return tf_ctl
-
-@pytest.fixture(scope='session')
-def create_TransportSlice():
-    LOGGER.warning('create_TransportSlice begin')
-
-    # form request
-    slice_req              = slice_pb2.TransportSlice()
-    slice_req.contextId    = context_pb2.ContextId()
-    slice_req.contextId.context_uuid = context_pb2.Uuid()
-    slice_req.contextId.context_uuid.uuid = str(1) 
-    slice_req.slice_id     = context_pb2.Uuid()
-    slice_req.slice_id.context_uuid.uuid = str(1) 
-
-    return slice_req
-
-
-###########################
-# Tests Implementation
-###########################
-
-
-# Test case that makes use of client fixture to test server's CreateKpi method
-def test_Authenticate(interdomain_client,create_TeraFlowController):
-    # make call to server
-    LOGGER.warning('test_Authenticate requesting')
-    response = interdomain_client.Authenticate(create_TeraFlowController)
-    LOGGER.debug(str(response))
-    assert isinstance(response, context.AuthenticationResult)
-
-# Test case that makes use of client fixture to test server's MonitorKpi method
-def test_LookUpSlice(interdomain_client,create_TransportSlice):
-    LOGGER.warning('test_LookUpSlice begin')
-
-    response = interdomain_client.LookUpSlice(create_TransportSlice)
-    LOGGER.debug(str(response))
-    assert isinstance(response, slice.SliceId)
-
-# Test case that makes use of client fixture to test server's GetStreamKpi method
-def test_CreateSliceAndAddToCatalog(interdomain_client,create_TransportSlice):
-    LOGGER.warning('test_CreateSliceAndAddToCatalog begin')
-    response = interdomain_client.CreateSliceAndAddToCatalog(create_TransportSlice)
-    LOGGER.debug(str(response))
-    assert isinstance(response, slice.SliceId)
-
-# Test case that makes use of client fixture to test server's IncludeKpi method
-def test_OrderSliceFromCatalog(interdomain_client,create_TransportSlice):
-    # make call to server
-    LOGGER.warning('test_OrderSliceFromCatalog requesting')
-    response = interdomain_client.OrderSliceFromCatalog(create_TransportSlice)
-    LOGGER.debug(str(response))
-    assert isinstance(response, slice.SliceId)
-
-
-
-
+#import logging, grpc
+#import os
+#import sqlite3
+#
+#import pytest
+#from typing import Tuple
+#
+#from interdomain.proto import context_pb2, kpi_sample_types_pb2, monitoring_pb2
+#from interdomain.client.interdomain_client import InterdomainClient
+#from interdomain.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
+#from interdomain.service.InterdomainService import InterdomainService
+#
+#from common.orm.Database import Database
+#from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
+#from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
+#from common.message_broker.MessageBroker import MessageBroker
+#
+#LOGGER = logging.getLogger(__name__)
+#LOGGER.setLevel(logging.DEBUG)
+#
+############################
+## Tests Setup
+############################
+#
+#SERVER_ADDRESS = '127.0.0.1'
+#LISTEN_ADDRESS = '[::]'
+#GRPC_PORT_MONITORING = 9090
+#
+#GRPC_PORT_CONTEXT    = 10000 + grpc_port_context    # avoid privileged ports
+#
+#SCENARIOS = [ # comment/uncomment scenarios to activate/deactivate them in the test unit
+#    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          ),
+#]
+#
+#
+## This fixture will be requested by test cases and last during testing session
+#@pytest.fixture(scope='session')
+#def interdomain_service():
+#    LOGGER.warning('interdomain_service begin')
+#
+#    interdomain_port    = GRPC_INTERDOMAIN_PORT
+#    max_workers     = GRPC_MAX_WORKERS
+#    grace_period    = GRPC_GRACE_PERIOD
+#
+#    LOGGER.info('Initializing InterdomainService...')
+#    grpc_service = InterdomainService(port=interdomain_port, max_workers=max_workers, grace_period=grace_period)
+#    server = grpc_service.start()
+#
+#    # yield the server, when test finishes, execution will resume to stop it
+#    LOGGER.warning('interdomain_service yielding')
+#    yield server
+#
+#    LOGGER.info('Terminating InterdomainService...')
+#    grpc_service.stop()
+#
+## This fixture will be requested by test cases and last during testing session.
+## The client requires the server, so client fixture has the server as dependency.
+#@pytest.fixture(scope='session')
+#def interdomain_client(interdomain_service):
+#    LOGGER.warning('interdomain_client begin')
+#    client = InterdomainClient(server=SERVER_ADDRESS, port=GRPC_PORT_INTERDOMAIN)  # instantiate the client
+#    LOGGER.warning('interdomain_client returning')
+#    return client
+#
+## This fixture will be requested by test cases and last during testing session.
+#@pytest.fixture(scope='session')
+#def create_TeraFlowController():
+#    LOGGER.warning('create_TeraFlowController begin')
+#    # form request
+#    tf_ctl                  = context_pb2.TeraFlowController()
+#    tf_ctl.context_id       = context_pb2.ContextId()
+#    tf_ctl.context_id.context_uuid = context_pb2.Uuid()
+#    tf_ctl.context_id.context_uuid.uuid = str(1) 
+#    tf_ctl.ip_address       = "127.0.0.1"
+#    tf_ctl.port      	    = 9090
+#    return tf_ctl
+#
+#@pytest.fixture(scope='session')
+#def create_TransportSlice():
+#    LOGGER.warning('create_TransportSlice begin')
+#
+#    # form request
+#    slice_req              = slice_pb2.TransportSlice()
+#    slice_req.contextId    = context_pb2.ContextId()
+#    slice_req.contextId.context_uuid = context_pb2.Uuid()
+#    slice_req.contextId.context_uuid.uuid = str(1) 
+#    slice_req.slice_id     = context_pb2.Uuid()
+#    slice_req.slice_id.context_uuid.uuid = str(1) 
+#
+#    return slice_req
+#
+#
+############################
+## Tests Implementation
+############################
+#
+#
+## Test case that makes use of client fixture to test server's CreateKpi method
+#def test_Authenticate(interdomain_client,create_TeraFlowController):
+#    # make call to server
+#    LOGGER.warning('test_Authenticate requesting')
+#    response = interdomain_client.Authenticate(create_TeraFlowController)
+#    LOGGER.debug(str(response))
+#    assert isinstance(response, context.AuthenticationResult)
+#
+## Test case that makes use of client fixture to test server's MonitorKpi method
+#def test_LookUpSlice(interdomain_client,create_TransportSlice):
+#    LOGGER.warning('test_LookUpSlice begin')
+#
+#    response = interdomain_client.LookUpSlice(create_TransportSlice)
+#    LOGGER.debug(str(response))
+#    assert isinstance(response, slice.SliceId)
+#
+## Test case that makes use of client fixture to test server's GetStreamKpi method
+#def test_CreateSliceAndAddToCatalog(interdomain_client,create_TransportSlice):
+#    LOGGER.warning('test_CreateSliceAndAddToCatalog begin')
+#    response = interdomain_client.CreateSliceAndAddToCatalog(create_TransportSlice)
+#    LOGGER.debug(str(response))
+#    assert isinstance(response, slice.SliceId)
+#
+## Test case that makes use of client fixture to test server's IncludeKpi method
+#def test_OrderSliceFromCatalog(interdomain_client,create_TransportSlice):
+#    # make call to server
+#    LOGGER.warning('test_OrderSliceFromCatalog requesting')
+#    response = interdomain_client.OrderSliceFromCatalog(create_TransportSlice)
+#    LOGGER.debug(str(response))
+#    assert isinstance(response, slice.SliceId)
+#
diff --git a/src/monitoring/Dockerfile b/src/monitoring/Dockerfile
index c1bba549eaaf5cfaaacd16d01a9d2204bc09c393..ca18929f18816176a503126f43514b7ef32a3a12 100644
--- a/src/monitoring/Dockerfile
+++ b/src/monitoring/Dockerfile
@@ -39,8 +39,6 @@ RUN mkdir -p /var/teraflow/common
 RUN mkdir -p /var/teraflow/common/tools
 RUN mkdir -p /var/teraflow/common/rpc_method_wrapper
 RUN mkdir -p /var/teraflow/device
-RUN mkdir -p /var/teraflow/device/proto
-RUN mkdir -p /var/teraflow/device/client
 RUN mkdir -p /var/teraflow/context
 
 # Get Python packages per module
@@ -50,11 +48,9 @@ RUN python3 -m pip install -r requirements.txt
 
 # add files into working directory
 COPY monitoring/. monitoring
-COPY device/proto/. device/proto
-COPY device/client/. device/client
-COPY device/Config.py device
 COPY common/. common
 COPY context/. context
+COPY device/. device
 
 RUN rm -r common/message_broker/tests
 RUN rm -r common/orm/tests
diff --git a/src/monitoring/client/MonitoringClient.py b/src/monitoring/client/MonitoringClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8b39b8bf8d0ae84da19fa651da00633486e6bc6
--- /dev/null
+++ b/src/monitoring/client/MonitoringClient.py
@@ -0,0 +1,99 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from typing import Iterator
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from monitoring.proto.context_pb2 import Empty
+from monitoring.proto.monitoring_pb2 import Kpi, KpiDescriptor, KpiId, MonitorKpiRequest
+from monitoring.proto.monitoring_pb2_grpc import MonitoringServiceStub
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class MonitoringClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.MONITORING)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.MONITORING)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = MonitoringServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def CreateKpi(self, request : KpiDescriptor) -> KpiId:
+        LOGGER.debug('CreateKpi: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.CreateKpi(request)
+        LOGGER.debug('CreateKpi result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetKpiDescriptor(self, request : KpiId) -> KpiDescriptor:
+        LOGGER.debug('GetKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetKpiDescriptor(request)
+        LOGGER.debug('GetKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def IncludeKpi(self, request : Kpi) -> Empty:
+        LOGGER.debug('IncludeKpi: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.IncludeKpi(request)
+        LOGGER.debug('IncludeKpi result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def MonitorKpi(self, request : MonitorKpiRequest) -> Empty:
+        LOGGER.debug('MonitorKpi: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.MonitorKpi(request)
+        LOGGER.debug('MonitorKpi result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetStreamKpi(self, request : KpiId) -> Iterator[Kpi]:
+        LOGGER.debug('GetStreamKpi: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetStreamKpi(request)
+        LOGGER.debug('GetStreamKpi result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetInstantKpi(self, request : KpiId) -> Kpi:
+        LOGGER.debug('GetInstantKpi: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetInstantKpi(request)
+        LOGGER.debug('GetInstantKpi result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+
+if __name__ == '__main__':
+    import sys
+    # get port
+    _port = sys.argv[1] if len(sys.argv) > 1 else '7070'
+
+    # make call to server
+    client = MonitoringClient(port=_port)
diff --git a/src/monitoring/client/monitoring_client.py b/src/monitoring/client/monitoring_client.py
deleted file mode 100644
index 62bfb519e7649427cad5b8f9e3bc0f849a9b9a39..0000000000000000000000000000000000000000
--- a/src/monitoring/client/monitoring_client.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import grpc
-
-from monitoring.proto import monitoring_pb2
-from monitoring.proto import monitoring_pb2_grpc
-from monitoring.proto import context_pb2
-
-from common.logger import getJSONLogger
-LOGGER = getJSONLogger('monitoring-client')
-LOGGER.setLevel('DEBUG')
-
-class MonitoringClient:
-
-    def __init__(self, server='monitoring', port='7070'):
-        endpoint = '{}:{}'.format(server, port)
-        LOGGER.info('init monitoringClient {}'.format(endpoint))
-        self.channel = grpc.insecure_channel(endpoint)
-        self.server = monitoring_pb2_grpc.MonitoringServiceStub(self.channel)
-
-    def CreateKpi(self, request):
-        LOGGER.info('CreateKpi: {}'.format(request))
-        response = self.server.CreateKpi(request)
-        LOGGER.info('CreateKpi result: {}'.format(response))
-        return response
-
-    def MonitorKpi(self, request):
-        LOGGER.info('MonitorKpi: {}'.format(request))
-        response = self.server.MonitorKpi(request)
-        LOGGER.info('MonitorKpi result: {}'.format(response))
-        return response
-
-    def IncludeKpi(self, request):
-        LOGGER.info('IncludeKpi: {}'.format(request))
-        response = self.server.IncludeKpi(request)
-        LOGGER.info('IncludeKpi result: {}'.format(response))
-        return response
-
-    def GetStreamKpi(self, request):
-        LOGGER.info('GetStreamKpi: {}'.format(request))
-        response = self.server.GetStreamKpi(request)
-        LOGGER.info('GetStreamKpi result: {}'.format(response))
-        yield monitoring_pb2.Kpi()
-
-    def GetInstantKpi(self, request):
-        LOGGER.info('GetInstantKpi: {}'.format(request))
-        response = self.server.GetInstantKpi(request)
-        LOGGER.info('GetInstantKpi result: {}'.format(response))
-        return monitoring_pb2.Kpi()
-
-    def GetKpiDescriptor(self, request):
-        LOGGER.info('GetKpiDescriptor: {}'.format(request))
-        response = self.server.GetKpiDescriptor(request)
-        LOGGER.info('GetKpiDescriptor result: {}'.format(response))
-        return response
-
-if __name__ == '__main__':
-    # get port
-    port = sys.argv[1] if len(sys.argv) > 1 else '7070'
-
-    # make call to server
-    client = MonitoringClient(port=port)
diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in
index 839ea33af0fbd448f9a5c4d6cb053e71bd420468..4d4f057bdc6b66d699ef1e723b64c05d60f5cc0e 100644
--- a/src/monitoring/requirements.in
+++ b/src/monitoring/requirements.in
@@ -1,17 +1,25 @@
-#google-api-core
+anytree==2.8.0
+APScheduler==3.8.1
+fastcache==1.1.0
 grpcio==1.43.0
 grpcio-health-checking==1.43.0
+#google-api-core
 #opencensus[stackdriver]
-python-json-logger
 #google-cloud-profiler
 #numpy
+Jinja2==3.0.3
+ncclient==0.6.13
+p4runtime==1.3.0
+paramiko==2.9.2
 prometheus-client==0.13.0
 protobuf==3.19.3
 pytest==6.2.5
 pytest-benchmark==3.4.1
 influxdb
+python-dateutil==2.8.2
+python-json-logger==2.0.2
+pytz==2021.3
 redis==4.1.2
-#anytree==2.8.0
-#APScheduler==3.8.1
-#xmltodict==0.12.0
+requests==2.27.1
+xmltodict==0.12.0
 coverage==6.3
diff --git a/src/monitoring/service/EventTools.py b/src/monitoring/service/EventTools.py
index 636556425af9ac02487386d81b9d8d4e786aa560..04c06e74203efcd3fab7566b2687dcdfc7e62658 100644
--- a/src/monitoring/service/EventTools.py
+++ b/src/monitoring/service/EventTools.py
@@ -19,26 +19,27 @@ import grpc
 
 from common.rpc_method_wrapper.ServiceExceptions import ServiceException
 from context.client.ContextClient import ContextClient
-from context.proto import kpi_sample_types_pb2
+#from context.proto import kpi_sample_types_pb2
 from context.proto.context_pb2 import Empty, EventTypeEnum
 
 from common.logger import getJSONLogger
-from monitoring.client.monitoring_client import MonitoringClient
+from monitoring.client.MonitoringClient import MonitoringClient
 from monitoring.proto import monitoring_pb2
 
 LOGGER = getJSONLogger('monitoringservice-server')
 LOGGER.setLevel('DEBUG')
 
 class EventsDeviceCollector:
-    def __init__(self, context_client_grpc : ContextClient, monitoring_client_grpc : MonitoringClient) -> None: # pylint: disable=redefined-outer-name
+    def __init__(self) -> None: # pylint: disable=redefined-outer-name
         self._events_queue = Queue()
 
-        self._device_stream     = context_client_grpc.GetDeviceEvents(Empty())
-        self._context_client    = context_client_grpc
-        self._channel           = context_client_grpc.channel
-        self._monitoring_client = monitoring_client_grpc
+        self._context_client_grpc = ContextClient()
+        self._device_stream     = self._context_client_grpc.GetDeviceEvents(Empty())
+        self._context_client    = self._context_client_grpc
+        self._channel           = self._context_client_grpc.channel
+        self._monitoring_client = MonitoringClient(host='127.0.0.1')
 
-        self._device_thread   = threading.Thread(target=self._collect, args=(self._device_stream  ,), daemon=False)
+        self._device_thread   = threading.Thread(target=self._collect, args=(self._device_stream,), daemon=False)
 
     def grpc_server_on(self):
         try:
diff --git a/src/monitoring/service/MonitoringService.py b/src/monitoring/service/MonitoringService.py
index f1ecba3664dfee74fddb7093ff352724791b4f7d..0736eba435820344750225c28e5b1348c7f7dfbc 100644
--- a/src/monitoring/service/MonitoringService.py
+++ b/src/monitoring/service/MonitoringService.py
@@ -12,63 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from concurrent import futures
-
-import grpc, logging
-
-from monitoring.service.MonitoringServiceServicerImpl import MonitoringServiceServicerImpl
-from monitoring.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
-from monitoring.proto.monitoring_pb2_grpc import  add_MonitoringServiceServicer_to_server
-
-from grpc_health.v1 import health
-from grpc_health.v1 import health_pb2
-from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
-
-from common.logger import getJSONLogger
-LOGGER = getJSONLogger('monitoring-server')
-
-BIND_ADDRESS = '0.0.0.0'
-
-class MonitoringService:
-    def __init__(self, address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS,
-                 grace_period=GRPC_GRACE_PERIOD):
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.monitoring_servicer = None
-        self.health_servicer = None
-        self.pool = None
-        self.server = None
-
-    def start(self):
-        # create gRPC server
-        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=self.max_workers)) # ,interceptors=(tracer_interceptor,))
-
-        # add monitoring servicer class to gRPC server
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from monitoring.proto.monitoring_pb2_grpc import add_MonitoringServiceServicer_to_server
+from .MonitoringServiceServicerImpl import MonitoringServiceServicerImpl
+
+class MonitoringService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.MONITORING)
+        super().__init__(port, cls_name=cls_name)
         self.monitoring_servicer = MonitoringServiceServicerImpl()
-        add_MonitoringServiceServicer_to_server(self.monitoring_servicer, self.server)
-
-        # add gRPC health checker servicer class to gRPC server
-        self.health_servicer = health.HealthServicer(
-            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
-        add_HealthServicer_to_server(self.health_servicer, self.server)
-
-        # start server
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.server.add_insecure_port(self.endpoint)
-        self.server.start()
-        self.health_servicer.set('', health_pb2.HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
-
-        LOGGER.debug('Service started')
-
-    def stop(self):
-        LOGGER.debug('Stopping service (grace period {} seconds)...'.format(self.grace_period))
-        self.health_servicer.enter_graceful_shutdown()
-        self.server.stop(self.grace_period)
-        LOGGER.debug('Service stopped')
 
+    def install_servicers(self):
+        add_MonitoringServiceServicer_to_server(self.monitoring_servicer, self.server)
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index 88cd2d3a83357dec5c49e1894ed4243ceb1b4b6e..28c1ed12045a8d6dfbd9669e8ce2f081248cabd4 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -12,29 +12,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os,grpc, logging
-import socket
-
-from prometheus_client import Summary
-from prometheus_client import Counter
-from common.Settings import get_setting
-
-from monitoring.Config import DEVICE_GRPC_SERVICE_PORT, DEVICE_SERVICE_HOST
+import os, grpc, logging
+from prometheus_client import Counter, Summary
 from monitoring.proto.kpi_sample_types_pb2 import KpiSampleType
 from monitoring.service import SqliteTools, InfluxTools
 from monitoring.proto import monitoring_pb2
 from monitoring.proto import monitoring_pb2_grpc
-
 from common.rpc_method_wrapper.ServiceExceptions import ServiceException
-
 from context.proto import context_pb2
-
 from device.client.DeviceClient import DeviceClient
 from device.proto import device_pb2
 
 LOGGER = logging.getLogger(__name__)
 
-MONITORING_GETINSTANTKPI_REQUEST_TIME = Summary('monitoring_getinstantkpi_processing_seconds', 'Time spent processing monitoring instant kpi request')
+MONITORING_GETINSTANTKPI_REQUEST_TIME = Summary(
+    'monitoring_getinstantkpi_processing_seconds', 'Time spent processing monitoring instant kpi request')
 MONITORING_INCLUDEKPI_COUNTER = Counter('monitoring_includekpi_counter', 'Monitoring include kpi request counter')
 
 INFLUXDB_HOSTNAME = os.environ.get("INFLUXDB_HOSTNAME")
@@ -42,9 +34,6 @@ INFLUXDB_USER = os.environ.get("INFLUXDB_USER")
 INFLUXDB_PASSWORD = os.environ.get("INFLUXDB_PASSWORD")
 INFLUXDB_DATABASE = os.environ.get("INFLUXDB_DATABASE")
 
-DEVICE_SERVICE_HOST = get_setting('DEVICESERVICE_SERVICE_HOST',      default=DEVICE_SERVICE_HOST     )
-DEVICE_SERVICE_PORT = get_setting('DEVICESERVICE_SERVICE_PORT_GRPC', default=DEVICE_GRPC_SERVICE_PORT)
-
 
 class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceServicer):
     def __init__(self):
@@ -52,13 +41,14 @@ class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceService
 
         # Init sqlite monitoring db
         self.sql_db = SqliteTools.SQLite('monitoring.db')
-        self.deviceClient = DeviceClient(address=DEVICE_SERVICE_HOST, port=DEVICE_GRPC_SERVICE_PORT)  # instantiate the client
 
         # Create influx_db client
         self.influx_db = InfluxTools.Influx(INFLUXDB_HOSTNAME,"8086",INFLUXDB_USER,INFLUXDB_PASSWORD,INFLUXDB_DATABASE)
 
     # CreateKpi (CreateKpiRequest) returns (KpiId) {}
-    def CreateKpi(self, request : monitoring_pb2.KpiDescriptor, grpc_context : grpc.ServicerContext) -> monitoring_pb2.KpiId :
+    def CreateKpi(
+        self, request : monitoring_pb2.KpiDescriptor, grpc_context : grpc.ServicerContext
+    ) -> monitoring_pb2.KpiId:
         # CREATEKPI_COUNTER_STARTED.inc()
         LOGGER.info('CreateKpi')
         try:
@@ -71,7 +61,8 @@ class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceService
             kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid
             kpi_service_id  = request.service_id.service_uuid.uuid
 
-            data = self.sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+            data = self.sql_db.insert_KPI(
+                kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
 
             kpi_id.kpi_id.uuid = str(data)
 
@@ -87,7 +78,9 @@ class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceService
             grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
 
     # rpc MonitorKpi (MonitorKpiRequest) returns (context.Empty) {}
-    def MonitorKpi ( self, request : monitoring_pb2.MonitorKpiRequest, grpc_context : grpc.ServicerContext) -> context_pb2.Empty:
+    def MonitorKpi(
+        self, request : monitoring_pb2.MonitorKpiRequest, grpc_context : grpc.ServicerContext
+    ) -> context_pb2.Empty:
 
         LOGGER.info('MonitorKpi')
         try:
@@ -97,25 +90,23 @@ class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceService
             kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
 
             monitor_device_request.kpi_descriptor.CopyFrom(kpiDescriptor)
-            monitor_device_request.kpi_id.kpi_id.uuid                               = request.kpi_id.kpi_id.uuid
-            monitor_device_request.sampling_duration_s                              = request.sampling_duration_s
-            monitor_device_request.sampling_interval_s                              = request.sampling_interval_s
+            monitor_device_request.kpi_id.kpi_id.uuid  = request.kpi_id.kpi_id.uuid
+            monitor_device_request.sampling_duration_s = request.sampling_duration_s
+            monitor_device_request.sampling_interval_s = request.sampling_interval_s
 
-            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            if s.connect_ex((DEVICE_SERVICE_HOST, DEVICE_GRPC_SERVICE_PORT)) == 0:
-                self.deviceClient.MonitorDeviceKpi(monitor_device_request)
-            else:
-                LOGGER.warning('Device service is not reachable')
+            device_client = DeviceClient()
+            device_client.MonitorDeviceKpi(monitor_device_request)
 
-            return context_pb2.Empty()
         except ServiceException as e:
             LOGGER.exception('MonitorKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('MonitorKpi exception')
+            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
             # CREATEKPI_COUNTER_FAILED.inc()
 
+        return context_pb2.Empty()
 
     # rpc IncludeKpi(IncludeKpiRequest)  returns(context.Empty)    {}
     def IncludeKpi(self, request : monitoring_pb2.Kpi, grpc_context : grpc.ServicerContext) -> context_pb2.Empty:
@@ -145,7 +136,7 @@ class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceService
             LOGGER.exception('IncludeKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
             grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
+        except Exception:  # pragma: no cover
             LOGGER.exception('IncludeKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
         return context_pb2.Empty()
@@ -162,7 +153,9 @@ class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceService
         return monitoring_pb2.Kpi()
 
 
-    def GetKpiDescriptor(self, request : monitoring_pb2.KpiId, grpc_context : grpc.ServicerContext) -> monitoring_pb2.KpiDescriptor:
+    def GetKpiDescriptor(
+        self, request : monitoring_pb2.KpiId, grpc_context : grpc.ServicerContext
+    ) -> monitoring_pb2.KpiDescriptor:
         LOGGER.info('getting Kpi by KpiID')
         try:
             kpi_db = self.sql_db.get_KPI(int(request.kpi_id.uuid))
@@ -183,5 +176,5 @@ class MonitoringServiceServicerImpl(monitoring_pb2_grpc.MonitoringServiceService
             LOGGER.exception('GetKpiDescriptor exception')
             grpc_context.abort(e.code, e.details)
 
-        except Exception as e:  # pragma: no cover
+        except Exception:  # pragma: no cover
             LOGGER.exception('GetKpiDescriptor exception')
diff --git a/src/monitoring/service/__main__.py b/src/monitoring/service/__main__.py
index 7835b4fc8a37f27419cebebfd0bf75b86820f38d..714046517e7b0ea591dce7411c5cb91ca3b867a6 100644
--- a/src/monitoring/service/__main__.py
+++ b/src/monitoring/service/__main__.py
@@ -12,44 +12,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, signal, sys, threading, socket
-
-from common.Settings import get_setting, wait_for_environment_variables
-from context.client.ContextClient import ContextClient
-from monitoring.Config import (
-    GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, LOG_LEVEL, METRICS_PORT, CONTEXT_GRPC_SERVICE_PORT,
-    CONTEXT_SERVICE_HOST)
-
-from monitoring.client.monitoring_client import MonitoringClient
-from monitoring.proto import monitoring_pb2
-from monitoring.service.EventTools import EventsDeviceCollector
-from monitoring.service.MonitoringService import MonitoringService
-
+import logging, signal, sys, threading
 from prometheus_client import start_http_server
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    wait_for_environment_variables)
+from monitoring.proto import monitoring_pb2
+from .EventTools import EventsDeviceCollector
+from .MonitoringService import MonitoringService
 
 terminate = threading.Event()
 LOGGER = None
-LOCALHOST = '127.0.0.1'
 
-def signal_handler(signal, frame):
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
     LOGGER.warning('Terminate signal received')
     terminate.set()
 
 def start_monitoring():
     LOGGER.info('Start Monitoring...',)
 
-    grpc_service_port    = get_setting('MONITORINGSERVICE_SERVICE_PORT_GRPC', default=GRPC_SERVICE_PORT        )
-    context_service_host = get_setting('CONTEXTSERVICE_SERVICE_HOST',         default=CONTEXT_SERVICE_HOST     )
-    context_service_port = get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC',    default=CONTEXT_GRPC_SERVICE_PORT)
-
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    if s.connect_ex((context_service_host, int(context_service_port))) != 0:
-        LOGGER.info('Context service is not reachable')
-        return
-
-    context_client_grpc = ContextClient(address=context_service_host, port=context_service_port)
-    monitoring_client = MonitoringClient(server=LOCALHOST, port=grpc_service_port)  # instantiate the client
-    events_collector = EventsDeviceCollector(context_client_grpc, monitoring_client)
+    events_collector = EventsDeviceCollector()
     events_collector.start()
 
     # Iterate while terminate is not set
@@ -64,8 +47,7 @@ def start_monitoring():
                 monitor_kpi_request.kpi_id.CopyFrom(kpi_id)
                 monitor_kpi_request.sampling_duration_s = 86400
                 monitor_kpi_request.sampling_interval_s = 30
-
-                monitoring_client.MonitorKpi(monitor_kpi_request)
+                events_collector._monitoring_client.MonitorKpi(monitor_kpi_request)
     else:
         # Terminate is set, looping terminates
         LOGGER.warning("Stopping execution...")
@@ -73,31 +55,28 @@ def start_monitoring():
     events_collector.start()
 
 def main():
-    global LOGGER
-
-    grpc_service_port = get_setting('MONITORINGSERVICE_SERVICE_PORT_GRPC', default=GRPC_SERVICE_PORT)
-    max_workers       = get_setting('MAX_WORKERS',                         default=GRPC_MAX_WORKERS )
-    grace_period      = get_setting('GRACE_PERIOD',                        default=GRPC_GRACE_PERIOD)
-    log_level         = get_setting('LOG_LEVEL',                           default=LOG_LEVEL        )
-    metrics_port      = get_setting('METRICS_PORT',                        default=METRICS_PORT     )
+    global LOGGER # pylint: disable=global-statement
 
+    log_level = get_log_level()
     logging.basicConfig(level=log_level)
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        'CONTEXTSERVICE_SERVICE_HOST', 'CONTEXTSERVICE_SERVICE_PORT_GRPC',
-        'DEVICESERVICE_SERVICE_HOST', 'DEVICESERVICE_SERVICE_PORT_GRPC'
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
     LOGGER.info('Starting...')
+
     # Start metrics server
+    metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
     # Starting monitoring service
-    grpc_service = MonitoringService(port=grpc_service_port, max_workers=max_workers, grace_period=grace_period)
+    grpc_service = MonitoringService()
     grpc_service.start()
 
     start_monitoring()
@@ -112,4 +91,4 @@ def main():
     return 0
 
 if __name__ == '__main__':
-    sys.exit(main())
\ No newline at end of file
+    sys.exit(main())
diff --git a/src/monitoring/tests/Messages.py b/src/monitoring/tests/Messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd4db01fdd6f94e0e376129b90c89b6475b48449
--- /dev/null
+++ b/src/monitoring/tests/Messages.py
@@ -0,0 +1,49 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from monitoring.proto import monitoring_pb2
+from monitoring.proto.kpi_sample_types_pb2 import KpiSampleType
+
+def kpi():
+    _kpi                    = monitoring_pb2.Kpi()
+    _kpi.kpi_id.kpi_id.uuid = 'KPIID0000'   # pylint: disable=maybe-no-member
+    return _kpi
+
+def kpi_id():
+    _kpi_id             = monitoring_pb2.KpiId()
+    _kpi_id.kpi_id.uuid = str(1)            # pylint: disable=maybe-no-member
+    return _kpi_id
+
+def create_kpi_request():
+    _create_kpi_request                                = monitoring_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_description                = 'KPI Description Test'
+    _create_kpi_request.kpi_sample_type                = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
+    _create_kpi_request.device_id.device_uuid.uuid     = 'DEV1'     # pylint: disable=maybe-no-member
+    _create_kpi_request.service_id.service_uuid.uuid   = 'SERV1'    # pylint: disable=maybe-no-member
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END1'     # pylint: disable=maybe-no-member
+    return _create_kpi_request
+
+def monitor_kpi_request(kpi_uuid, sampling_duration_s, sampling_interval_s):
+    _monitor_kpi_request                     = monitoring_pb2.MonitorKpiRequest()
+    _monitor_kpi_request.kpi_id.kpi_id.uuid  = kpi_uuid   # pylint: disable=maybe-no-member
+    _monitor_kpi_request.sampling_duration_s = sampling_duration_s
+    _monitor_kpi_request.sampling_interval_s = sampling_interval_s
+    return _monitor_kpi_request
+
+def include_kpi_request():
+    _include_kpi_request                    = monitoring_pb2.Kpi()
+    _include_kpi_request.kpi_id.kpi_id.uuid = str(1)    # pylint: disable=maybe-no-member
+    _include_kpi_request.timestamp          = "2021-10-12T13:14:42Z"
+    _include_kpi_request.kpi_value.intVal   = 500       # pylint: disable=maybe-no-member
+    return _include_kpi_request
diff --git a/src/monitoring/tests/Objects.py b/src/monitoring/tests/Objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..852da6bc98dee441a1e21e60e1b3ae7436b98ebf
--- /dev/null
+++ b/src/monitoring/tests/Objects.py
@@ -0,0 +1,30 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.tools.object_factory.Device import (
+    json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled)
+from context.proto.kpi_sample_types_pb2 import KpiSampleType
+
+PACKET_PORT_SAMPLE_TYPES = [
+    KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED,
+    KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED,
+    KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED,
+    KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED,
+]
+
+DEVICE_DEV1_UUID          = 'DEV1'
+ENDPOINT_END1_UUID        = 'END1'
+DEVICE_DEV1_ENDPOINT_DEFS = [(ENDPOINT_END1_UUID, 'copper', PACKET_PORT_SAMPLE_TYPES)]
+DEVICE_DEV1               = json_device_emulated_packet_router_disabled(DEVICE_DEV1_UUID)
+DEVICE_DEV1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_DEV1_ENDPOINT_DEFS)
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index 0701c5ce8bc56ee34d0a90760b0d6e88fdbbee42..d3799689eee4a1a12f4d4d7998d92482c59adb16 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -12,36 +12,35 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-import os
-import socket
-import pytest
+import copy, logging, os, pytest
 from typing import Tuple
-
-
-from monitoring.client.monitoring_client import MonitoringClient
-from monitoring.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, DEVICE_GRPC_SERVICE_PORT
-from monitoring.proto import context_pb2, monitoring_pb2
-from monitoring.proto.kpi_sample_types_pb2 import KpiSampleType
-from monitoring.service import SqliteTools, InfluxTools
-from monitoring.service.MonitoringService import MonitoringService
-from monitoring.service.EventTools import EventsDeviceCollector
-
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
 from common.orm.Database import Database
 from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
 from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
 from common.message_broker.MessageBroker import MessageBroker
 
-from context.Config import (
-    GRPC_SERVICE_PORT as grpc_port_context,
-    GRPC_MAX_WORKERS as grpc_workers_context,
-    GRPC_GRACE_PERIOD as grpc_grace_context
-)
 from context.client.ContextClient import ContextClient
 from context.service.grpc_server.ContextService import ContextService
-from context.service.Populate import populate
 from context.proto.context_pb2 import EventTypeEnum, DeviceEvent, Device
-from context.tests.Objects import (DEVICE_R1, DEVICE_R1_UUID)
+
+from device.client.DeviceClient import DeviceClient
+from device.service.DeviceService import DeviceService
+from device.service.driver_api.DriverFactory import DriverFactory
+from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
+from device.service.drivers import DRIVERS
+
+from monitoring.client.MonitoringClient import MonitoringClient
+from monitoring.proto import context_pb2, monitoring_pb2
+from monitoring.proto.kpi_sample_types_pb2 import KpiSampleType
+from monitoring.service import SqliteTools, InfluxTools
+from monitoring.service.MonitoringService import MonitoringService
+from monitoring.service.EventTools import EventsDeviceCollector
+from monitoring.tests.Messages import create_kpi_request, include_kpi_request, kpi, kpi_id, monitor_kpi_request
+from monitoring.tests.Objects import DEVICE_DEV1, DEVICE_DEV1_CONNECT_RULES, DEVICE_DEV1_UUID
+
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
@@ -50,18 +49,19 @@ LOGGER.setLevel(logging.DEBUG)
 # Tests Setup
 ###########################
 
-SERVER_ADDRESS = '127.0.0.1'
-LISTEN_ADDRESS = '[::]'
-GRPC_PORT_MONITORING = 7070
+LOCAL_HOST = '127.0.0.1'
 
-GRPC_PORT_CONTEXT    = 10000 + grpc_port_context    # avoid privileged ports
-DEVICE_GRPC_SERVICE_PORT = 10000 + DEVICE_GRPC_SERVICE_PORT # avoid privileged ports
-MONITORING_GRPC_SERVICE_PORT = GRPC_PORT_MONITORING # avoid privileged ports
+CONTEXT_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.CONTEXT) # avoid privileged ports
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(CONTEXT_SERVICE_PORT)
 
+DEVICE_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports
+os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(DEVICE_SERVICE_PORT)
 
-SCENARIOS = [ # comment/uncomment scenarios to activate/deactivate them in the test unit
-    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          ),
-]
+MONITORING_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.MONITORING) # avoid privileged ports
+os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(MONITORING_SERVICE_PORT)
 
 INFLUXDB_HOSTNAME = os.environ.get("INFLUXDB_HOSTNAME")
 INFLUXDB_PORT = os.environ.get("INFLUXDB_PORT")
@@ -69,49 +69,61 @@ INFLUXDB_USER = os.environ.get("INFLUXDB_USER")
 INFLUXDB_PASSWORD = os.environ.get("INFLUXDB_PASSWORD")
 INFLUXDB_DATABASE = os.environ.get("INFLUXDB_DATABASE")
 
-@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS)
-def context_db_mb(request) -> Tuple[Database, MessageBroker]:
-    name,db_backend,db_settings,mb_backend,mb_settings = request.param
-    msg = 'Running scenario {:s} db_backend={:s}, db_settings={:s}, mb_backend={:s}, mb_settings={:s}...'
-    LOGGER.info(msg.format(str(name), str(db_backend.value), str(db_settings), str(mb_backend.value), str(mb_settings)))
-    _database = Database(get_database_backend(backend=db_backend, **db_settings))
-    _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings))
+@pytest.fixture(scope='session')
+def context_db_mb() -> Tuple[Database, MessageBroker]:
+    _database = Database(get_database_backend(backend=DatabaseBackendEnum.INMEMORY))
+    _message_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY))
     yield _database, _message_broker
     _message_broker.terminate()
 
 @pytest.fixture(scope='session')
-def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
-    database = context_db_mb[0]
+def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
+    database, message_broker = context_db_mb
     database.clear_all()
-    _service = ContextService(
-        database, context_db_mb[1], port=GRPC_PORT_CONTEXT, max_workers=grpc_workers_context,
-        grace_period=grpc_grace_context)
+    _service = ContextService(database, message_broker)
     _service.start()
     yield _service
     _service.stop()
 
 @pytest.fixture(scope='session')
-def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name
-    _client = ContextClient(address='localhost', port=GRPC_PORT_CONTEXT)
+def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name
+    _client = ContextClient()
     yield _client
     _client.close()
 
-
-# This fixture will be requested by test cases and last during testing session
 @pytest.fixture(scope='session')
-def monitoring_service():
-    LOGGER.warning('monitoring_service begin')
+def device_service(context_service : ContextService): # pylint: disable=redefined-outer-name
+    LOGGER.info('Initializing DeviceService...')
+    driver_factory = DriverFactory(DRIVERS)
+    driver_instance_cache = DriverInstanceCache(driver_factory)
+    _service = DeviceService(driver_instance_cache)
+    _service.start()
 
-    service_port    = GRPC_SERVICE_PORT
-    max_workers     = GRPC_MAX_WORKERS
-    grace_period    = GRPC_GRACE_PERIOD
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding DeviceService...')
+    yield _service
+
+    LOGGER.info('Terminating DeviceService...')
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name
+    _client = DeviceClient()
+    yield _client
+    _client.close()
 
+# This fixture will be requested by test cases and last during testing session
+@pytest.fixture(scope='session')
+def monitoring_service(
+        context_service : ContextService,  # pylint: disable=redefined-outer-name
+        device_service : DeviceService     # pylint: disable=redefined-outer-name
+    ):
     LOGGER.info('Initializing MonitoringService...')
-    _service = MonitoringService(port=service_port, max_workers=max_workers, grace_period=grace_period)
+    _service = MonitoringService()
     _service.start()
 
     # yield the server, when test finishes, execution will resume to stop it
-    LOGGER.warning('monitoring_service yielding')
+    LOGGER.info('Yielding MonitoringService...')
     yield _service
 
     LOGGER.info('Terminating MonitoringService...')
@@ -120,224 +132,187 @@ def monitoring_service():
 # This fixture will be requested by test cases and last during testing session.
 # The client requires the server, so client fixture has the server as dependency.
 @pytest.fixture(scope='session')
-def monitoring_client(monitoring_service):
-    LOGGER.warning('monitoring_client begin')
-    client = MonitoringClient(server=SERVER_ADDRESS, port=GRPC_PORT_MONITORING)  # instantiate the client
-    LOGGER.warning('monitoring_client returning')
-    return client
+def monitoring_client(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name
+    LOGGER.info('Initializing MonitoringClient...')
+    _client = MonitoringClient()
 
-# This fixture will be requested by test cases and last during testing session.
-@pytest.fixture(scope='session')
-def kpi():
-    LOGGER.warning('test_include_kpi begin')
-    # form request
-    kpi                     = monitoring_pb2.Kpi()
-    kpi.kpi_id.kpi_id.uuid  = 'KPIID0000'
-    kpi.kpiDescription      = 'KPI Desc'
-    return kpi
-
-@pytest.fixture(scope='session')
-def kpi_id():
-    LOGGER.warning('test_include_kpi begin')
-
-    # form request
-    kpi_id              = monitoring_pb2.KpiId()
-    kpi_id.kpi_id.uuid  = str(1)
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding MonitoringClient...')
+    yield _client
 
-    return kpi_id
+    LOGGER.info('Closing MonitoringClient...')
+    _client.close()
 
 @pytest.fixture(scope='session')
 def sql_db():
-    sql_db = SqliteTools.SQLite('monitoring.db')
-    return sql_db
+    _sql_db = SqliteTools.SQLite('monitoring.db')
+    return _sql_db
 
 @pytest.fixture(scope='session')
 def influx_db():
-    influx_db = InfluxTools.Influx(INFLUXDB_HOSTNAME, INFLUXDB_PORT, INFLUXDB_USER, INFLUXDB_PASSWORD, INFLUXDB_DATABASE)
-    return influx_db
-
-@pytest.fixture(scope='session')
-def create_kpi_request():
-    LOGGER.warning('test_include_kpi begin')
-
-    create_kpi_request                                  = monitoring_pb2.KpiDescriptor()
-    create_kpi_request.kpi_description                  = 'KPI Description Test'
-    create_kpi_request.kpi_sample_type                  = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
-    create_kpi_request.device_id.device_uuid.uuid       = 'DEV1'  # pylint: disable=maybe-no-member
-    create_kpi_request.service_id.service_uuid.uuid     = "SERV1"
-    create_kpi_request.endpoint_id.endpoint_uuid.uuid   = "END1"
-
-    return create_kpi_request
-
-@pytest.fixture(scope='session')
-def monitor_kpi_request():
-    LOGGER.warning('test_monitor_kpi begin')
-
-    monitor_kpi_request                     = monitoring_pb2.MonitorKpiRequest()
-    monitor_kpi_request.kpi_id.kpi_id.uuid  = str(1)
-    monitor_kpi_request.sampling_duration_s = 120
-    monitor_kpi_request.sampling_interval_s = 5
-
-    return monitor_kpi_request
-
-
-@pytest.fixture(scope='session')
-def include_kpi_request():
-    LOGGER.warning('test_include_kpi begin')
-
-    include_kpi_request                     = monitoring_pb2.Kpi()
-    include_kpi_request.kpi_id.kpi_id.uuid  = str(1)
-    include_kpi_request.timestamp           = "2021-10-12T13:14:42Z"
-    include_kpi_request.kpi_value.intVal    = 500
-
-    return include_kpi_request
-
-@pytest.fixture(scope='session')
-def address():
-    address = '127.0.0.1'
-    return address
+    _influx_db = InfluxTools.Influx(
+        INFLUXDB_HOSTNAME, INFLUXDB_PORT, INFLUXDB_USER, INFLUXDB_PASSWORD, INFLUXDB_DATABASE)
+    return _influx_db
 
-@pytest.fixture(scope='session')
-def port():
-    port = 7070
-    return port
 
 ###########################
 # Tests Implementation
 ###########################
 
 # Test case that makes use of client fixture to test server's CreateKpi method
-def test_create_kpi(monitoring_client,create_kpi_request):
+def test_create_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     # make call to server
     LOGGER.warning('test_create_kpi requesting')
-    response = monitoring_client.CreateKpi(create_kpi_request)
+    response = monitoring_client.CreateKpi(create_kpi_request())
     LOGGER.debug(str(response))
     assert isinstance(response, monitoring_pb2.KpiId)
 
 # Test case that makes use of client fixture to test server's MonitorKpi method
-def test_monitor_kpi(monitoring_client,create_kpi_request):
+def test_monitor_kpi(
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
+        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
+        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
+    ):
     LOGGER.warning('test_monitor_kpi begin')
 
-    response = monitoring_client.CreateKpi(create_kpi_request)
+    context_database = context_db_mb[0]
 
-    monitor_kpi_request                     = monitoring_pb2.MonitorKpiRequest()
-    monitor_kpi_request.kpi_id.kpi_id.uuid  = response.kpi_id.uuid
-    monitor_kpi_request.sampling_duration_s = 120
-    monitor_kpi_request.sampling_interval_s = 5
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    context_database.clear_all()
 
-    response = monitoring_client.MonitorKpi(monitor_kpi_request)
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
+    device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
+    device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES)
+    response = device_client.AddDevice(Device(**device_with_connect_rules))
+    assert response.device_uuid.uuid == DEVICE_DEV1_UUID
+
+    response = monitoring_client.CreateKpi(create_kpi_request())
+    _monitor_kpi_request = monitor_kpi_request(response.kpi_id.uuid, 120, 5) # pylint: disable=maybe-no-member
+    response = monitoring_client.MonitorKpi(_monitor_kpi_request)
     LOGGER.debug(str(response))
     assert isinstance(response, context_pb2.Empty)
 
 
 # Test case that makes use of client fixture to test server's IncludeKpi method
-def test_include_kpi(monitoring_client,include_kpi_request):
+def test_include_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     # make call to server
     LOGGER.warning('test_include_kpi requesting')
-    response = monitoring_client.IncludeKpi(include_kpi_request)
+    response = monitoring_client.IncludeKpi(include_kpi_request())
     LOGGER.debug(str(response))
     assert isinstance(response, context_pb2.Empty)
 
 # Test case that makes use of client fixture to test server's GetStreamKpi method
-def test_get_stream_kpi(monitoring_client,include_kpi_request):
+def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_getstream_kpi begin')
-    response = monitoring_client.GetStreamKpi(kpi)
+    response = monitoring_client.GetStreamKpi(kpi())
     LOGGER.debug(str(response))
     #assert isinstance(response, monitoring_pb2.Kpi)
 
 # Test case that makes use of client fixture to test server's GetInstantKpi method
-def test_get_instant_kpi(monitoring_client,kpi_id):
+def test_get_instant_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_getinstant_kpi begin')
-    response = monitoring_client.GetInstantKpi(kpi_id)
+    response = monitoring_client.GetInstantKpi(kpi_id())
     LOGGER.debug(str(response))
     assert isinstance(response, monitoring_pb2.Kpi)
 
 # Test case that makes use of client fixture to test server's GetInstantKpi method
-def test_get_kpidescritor_kpi(monitoring_client,create_kpi_request):
+def test_get_kpidescritor_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_getkpidescritor_kpi begin')
-
-    response = monitoring_client.CreateKpi(create_kpi_request)
-
+    response = monitoring_client.CreateKpi(create_kpi_request())
     response = monitoring_client.GetKpiDescriptor(response)
     LOGGER.debug(str(response))
     assert isinstance(response, monitoring_pb2.KpiDescriptor)
 
-def test_sqlitedb_tools_insert_kpi(sql_db, create_kpi_request):
+def test_sqlitedb_tools_insert_kpi(sql_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_sqlitedb_tools_insert_kpi begin')
-
-    kpi_description = create_kpi_request.kpi_description
-    kpi_sample_type = create_kpi_request.kpi_sample_type
-    kpi_device_id = create_kpi_request.device_id.device_uuid.uuid
-    kpi_endpoint_id = create_kpi_request.endpoint_id.endpoint_uuid.uuid
-    kpi_service_id = create_kpi_request.service_id.service_uuid.uuid
+    _create_kpi_request = create_kpi_request()
+    kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
+    kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
+    kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
+    kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
+    kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
 
     response = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
     assert isinstance(response, int)
 
-def test_sqlitedb_tools_get_kpi(sql_db, create_kpi_request):
+def test_sqlitedb_tools_get_kpi(sql_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_sqlitedb_tools_get_kpi begin')
-
-    kpi_description = create_kpi_request.kpi_description
-    kpi_sample_type = create_kpi_request.kpi_sample_type
-    kpi_device_id = create_kpi_request.device_id.device_uuid.uuid
-    kpi_endpoint_id = create_kpi_request.endpoint_id.endpoint_uuid.uuid
-    kpi_service_id = create_kpi_request.service_id.service_uuid.uuid
-
-    kpi_id = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
-
-    response = sql_db.get_KPI(kpi_id)
+    _create_kpi_request = create_kpi_request()
+    kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
+    kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
+    kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
+    kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
+    kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
+
+    _kpi_id = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+    response = sql_db.get_KPI(_kpi_id)
     assert isinstance(response, tuple)
 
-def test_sqlitedb_tools_get_kpis(sql_db):
+def test_sqlitedb_tools_get_kpis(sql_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_sqlitedb_tools_get_kpis begin')
     response = sql_db.get_KPIS()
     assert isinstance(response, list)
 
-def test_sqlitedb_tools_delete_kpi(sql_db, create_kpi_request):
+def test_sqlitedb_tools_delete_kpi(sql_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_sqlitedb_tools_get_kpi begin')
 
     response = sql_db.delete_KPI("DEV1",KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED)
 
-    if response == False:
-        kpi_description = create_kpi_request.kpi_description
-        kpi_sample_type = create_kpi_request.kpi_sample_type
-        kpi_device_id = create_kpi_request.device_id.device_uuid.uuid
-        kpi_endpoint_id = create_kpi_request.endpoint_id.endpoint_uuid.uuid
-        kpi_service_id = create_kpi_request.service_id.service_uuid.uuid
+    if not response:
+        _create_kpi_request = create_kpi_request()
+        kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
+        kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
+        kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
+        kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
+        kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
 
         sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
         response = sql_db.delete_KPI("DEV1", KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED)
 
-    assert response == True
+    assert response
 
-def test_sqlitedb_tools_delete_kpid_id(sql_db, create_kpi_request):
+def test_sqlitedb_tools_delete_kpid_id(sql_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_sqlitedb_tools_delete_kpid_id begin')
 
     response = sql_db.delete_kpid_id(1)
 
-    if response == False:
-        kpi_description = create_kpi_request.kpi_description
-        kpi_sample_type = create_kpi_request.kpi_sample_type
-        kpi_device_id = create_kpi_request.device_id.device_uuid.uuid
-        kpi_endpoint_id = create_kpi_request.endpoint_id.endpoint_uuid.uuid
-        kpi_service_id = create_kpi_request.service_id.service_uuid.uuid
+    if not response:
+        _create_kpi_request = create_kpi_request()
+        kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
+        kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
+        kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
+        kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
+        kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
 
-        kpi_id = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
-        response = sql_db.delete_kpid_id(kpi_id)
+        _kpi_id = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+        response = sql_db.delete_kpid_id(_kpi_id)
 
-    assert response == True
+    assert response
 
 
-def test_influxdb_tools_write_kpi(influx_db):
+def test_influxdb_tools_write_kpi(influx_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_influxdb_tools_write_kpi begin')
 
-def test_influxdb_tools_read_kpi_points(influx_db):
+def test_influxdb_tools_read_kpi_points(influx_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_influxdb_tools_read_kpi_points begin')
 
 
-def test_events_tools(context_client_grpc: ContextClient,  # pylint: disable=redefined-outer-name
-    monitoring_client : MonitoringClient,
-    context_db_mb: Tuple[Database, MessageBroker]):
+def test_events_tools(
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
+        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
+        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
+    ):
     LOGGER.warning('test_get_device_events begin')
 
     context_database = context_db_mb[0]
@@ -346,10 +321,10 @@ def test_events_tools(context_client_grpc: ContextClient,  # pylint: disable=red
     context_database.clear_all()
 
     # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsDeviceCollector(context_client_grpc, monitoring_client)
+    events_collector = EventsDeviceCollector()
     events_collector.start()
 
-    # # ----- Dump state of database before create the object ------------------------------------------------------------
+    # ----- Dump state of database before create the object ------------------------------------------------------------
     db_entries = context_database.dump()
     LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
     for db_entry in db_entries:
@@ -357,18 +332,22 @@ def test_events_tools(context_client_grpc: ContextClient,  # pylint: disable=red
     LOGGER.info('-----------------------------------------------------------')
     assert len(db_entries) == 0
 
-    populate('localhost', GRPC_PORT_CONTEXT) # place this call in the appropriate line, according to your tests
-
     # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
-    assert response.device_uuid.uuid == DEVICE_R1_UUID
+    LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
+    device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
+    device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES)
+    response = device_client.AddDevice(Device(**device_with_connect_rules))
+    assert response.device_uuid.uuid == DEVICE_DEV1_UUID
 
     events_collector.stop()
 
 
-def test_get_device_events(context_client_grpc: ContextClient,  # pylint: disable=redefined-outer-name
-    monitoring_client : MonitoringClient,
-    context_db_mb: Tuple[Database, MessageBroker]):
+def test_get_device_events(
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
+        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
+        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
+    ):
 
     LOGGER.warning('test_get_device_events begin')
 
@@ -378,10 +357,10 @@ def test_get_device_events(context_client_grpc: ContextClient,  # pylint: disabl
     context_database.clear_all()
 
     # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsDeviceCollector(context_client_grpc,monitoring_client)
+    events_collector = EventsDeviceCollector()
     events_collector.start()
 
-    # # ----- Dump state of database before create the object ------------------------------------------------------------
+    # ----- Dump state of database before create the object ------------------------------------------------------------
     db_entries = context_database.dump()
     LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
     for db_entry in db_entries:
@@ -389,20 +368,26 @@ def test_get_device_events(context_client_grpc: ContextClient,  # pylint: disabl
     LOGGER.info('-----------------------------------------------------------')
     assert len(db_entries) == 0
 
-    populate('localhost', GRPC_PORT_CONTEXT) # place this call in the appropriate line, according to your tests
-
     # ----- Check create event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
+    LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
+    device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
+    device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES)
+    response = device_client.AddDevice(Device(**device_with_connect_rules))
+    assert response.device_uuid.uuid == DEVICE_DEV1_UUID
 
+    event = events_collector.get_event(block=True)
     assert isinstance(event, DeviceEvent)
     assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID
+    assert event.device_id.device_uuid.uuid == DEVICE_DEV1_UUID
 
     events_collector.stop()
 
-def test_listen_events(monitoring_client: MonitoringClient,
-    context_client_grpc: ContextClient,  # pylint: disable=redefined-outer-name
-    context_db_mb: Tuple[Database, MessageBroker]):
+def test_listen_events(
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
+        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
+        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
+    ):
 
     LOGGER.warning('test_listen_events begin')
 
@@ -412,10 +397,10 @@ def test_listen_events(monitoring_client: MonitoringClient,
     context_database.clear_all()
 
     # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsDeviceCollector(context_client_grpc,monitoring_client)
+    events_collector = EventsDeviceCollector()
     events_collector.start()
 
-    # # ----- Dump state of database before create the object ------------------------------------------------------------
+    # ----- Dump state of database before create the object ------------------------------------------------------------
     db_entries = context_database.dump()
     LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
     for db_entry in db_entries:
@@ -423,18 +408,12 @@ def test_listen_events(monitoring_client: MonitoringClient,
     LOGGER.info('-----------------------------------------------------------')
     assert len(db_entries) == 0
 
-    populate('localhost', GRPC_PORT_CONTEXT) # place this call in the appropriate line, according to your tests
+    LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
+    device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
+    device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES)
+    response = device_client.AddDevice(Device(**device_with_connect_rules))
+    assert response.device_uuid.uuid == DEVICE_DEV1_UUID
 
     kpi_id_list = events_collector.listen_events()
 
-    assert bool(kpi_id_list) == True
-
-def test_socket_ports(address, port):
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    result = s.connect_ex((address,port))
-
-    if result == 0:
-        print('socket is open')
-    else:
-        print('socket is not open')
-    s.close()
+    assert len(kpi_id_list) > 0
diff --git a/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py b/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py
index 9c77a959cbdc484af143015a2868121af3845b0a..d4c71476f016081f7d230a3cfe87e73b35654987 100644
--- a/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py
+++ b/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py
@@ -16,11 +16,8 @@ import os, grpc, logging, random
 from influxdb import InfluxDBClient
 from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
 from context.client.ContextClient import ContextClient
-from context.Config import GRPC_SERVICE_PORT as CONTEXT_GRPC_SERVICE_PORT
-from monitoring.client.monitoring_client import MonitoringClient
-from monitoring.Config import GRPC_SERVICE_PORT as MONITORING_GRPC_SERVICE_PORT
+from monitoring.client.MonitoringClient import MonitoringClient
 from service.client.ServiceClient import ServiceClient
-from service.Config import GRPC_SERVICE_PORT as SERVICE_GRPC_SERVICE_PORT
 from dbscanserving.proto.dbscanserving_pb2 import DetectionRequest, DetectionResponse, Sample
 from dbscanserving.client.DbscanServingClient import DbscanServingClient
 from dbscanserving.Config import GRPC_SERVICE_PORT as DBSCANSERVING_GRPC_SERVICE_PORT
@@ -35,8 +32,7 @@ from opticalcentralizedattackdetector.proto.monitoring_pb2 import KpiList
 from opticalcentralizedattackdetector.proto.optical_centralized_attack_detector_pb2_grpc import (
     OpticalCentralizedAttackDetectorServiceServicer)
 from opticalcentralizedattackdetector.Config import (
-    CONTEXT_SERVICE_ADDRESS, SERVICE_SERVICE_ADDRESS, INFERENCE_SERVICE_ADDRESS, MONITORING_SERVICE_ADDRESS,
-    ATTACK_MITIGATOR_SERVICE_ADDRESS)
+    INFERENCE_SERVICE_ADDRESS, MONITORING_SERVICE_ADDRESS, ATTACK_MITIGATOR_SERVICE_ADDRESS)
 
 
 LOGGER = logging.getLogger(__name__)
@@ -49,12 +45,16 @@ INFLUXDB_HOSTNAME = os.environ.get("INFLUXDB_HOSTNAME")
 INFLUXDB_USER = os.environ.get("INFLUXDB_USER")
 INFLUXDB_PASSWORD = os.environ.get("INFLUXDB_PASSWORD")
 INFLUXDB_DATABASE = os.environ.get("INFLUXDB_DATABASE")
-context_client: ContextClient = ContextClient(address=CONTEXT_SERVICE_ADDRESS, port=CONTEXT_GRPC_SERVICE_PORT)
-influxdb_client: InfluxDBClient = InfluxDBClient(host=MONITORING_SERVICE_ADDRESS, port=8086, username=INFLUXDB_USER, password=INFLUXDB_PASSWORD, database=INFLUXDB_DATABASE)
-monitoring_client: MonitoringClient = MonitoringClient(server=MONITORING_SERVICE_ADDRESS, port=MONITORING_GRPC_SERVICE_PORT)
-dbscanserving_client: DbscanServingClient = DbscanServingClient(address=INFERENCE_SERVICE_ADDRESS, port=DBSCANSERVING_GRPC_SERVICE_PORT)
-service_client: ServiceClient = ServiceClient(SERVICE_SERVICE_ADDRESS, SERVICE_GRPC_SERVICE_PORT)
-attack_mitigator_client: OpticalAttackMitigatorClient = OpticalAttackMitigatorClient(address=ATTACK_MITIGATOR_SERVICE_ADDRESS, port=ATTACK_MITIGATOR_GRPC_SERVICE_PORT)
+context_client: ContextClient = ContextClient()
+influxdb_client: InfluxDBClient = InfluxDBClient(
+    host=MONITORING_SERVICE_ADDRESS, port=8086, username=INFLUXDB_USER, password=INFLUXDB_PASSWORD,
+    database=INFLUXDB_DATABASE)
+monitoring_client: MonitoringClient = MonitoringClient()
+dbscanserving_client: DbscanServingClient = DbscanServingClient(
+    address=INFERENCE_SERVICE_ADDRESS, port=DBSCANSERVING_GRPC_SERVICE_PORT)
+service_client: ServiceClient = ServiceClient()
+attack_mitigator_client: OpticalAttackMitigatorClient = OpticalAttackMitigatorClient(
+    address=ATTACK_MITIGATOR_SERVICE_ADDRESS, port=ATTACK_MITIGATOR_GRPC_SERVICE_PORT)
 
 
 class OpticalCentralizedAttackDetectorServiceServicerImpl(OpticalCentralizedAttackDetectorServiceServicer):
diff --git a/src/service/Config.py b/src/service/Config.py
index 5d551b023fc21202a1411ae46781682055fdef91..70a33251242c51f49140e596b8208a19dd5245f7 100644
--- a/src/service/Config.py
+++ b/src/service/Config.py
@@ -12,22 +12,3 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-
-# General settings
-LOG_LEVEL = logging.WARNING
-
-# gRPC settings
-GRPC_SERVICE_PORT = 3030
-GRPC_MAX_WORKERS  = 10
-GRPC_GRACE_PERIOD = 60
-
-# Prometheus settings
-METRICS_PORT = 9192
-
-# Dependency micro-service connection settings
-CONTEXT_SERVICE_HOST = '127.0.0.1'
-CONTEXT_SERVICE_PORT = 1010
-
-DEVICE_SERVICE_HOST = '127.0.0.1'
-DEVICE_SERVICE_PORT = 2020
diff --git a/src/service/client/ServiceClient.py b/src/service/client/ServiceClient.py
index a44842768b9ee112288653f57706c5c4549503e9..a6335bfceedeec02cd223f4e960e9f69afe7a35e 100644
--- a/src/service/client/ServiceClient.py
+++ b/src/service/client/ServiceClient.py
@@ -13,6 +13,8 @@
 # limitations under the License.
 
 import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from service.proto.context_pb2 import Empty, Service, ServiceId
@@ -24,9 +26,11 @@ DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
 RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
 
 class ServiceClient:
-    def __init__(self, address, port):
-        self.endpoint = '{:s}:{:s}'.format(str(address), str(port))
-        LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.SERVICE)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.SERVICE)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
         self.channel = None
         self.stub = None
         self.connect()
diff --git a/src/service/service/ServiceService.py b/src/service/service/ServiceService.py
index 21945b7d3c2fe75e27bfa37bc7465f75e7b660f1..356c314c380e8df151b60ed826c41d294f9462ca 100644
--- a/src/service/service/ServiceService.py
+++ b/src/service/service/ServiceService.py
@@ -12,72 +12,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
-from concurrent import futures
-from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
-from grpc_health.v1.health_pb2 import HealthCheckResponse
-from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
 from common.orm.backend.BackendEnum import BackendEnum
 from common.orm.Database import Database
 from common.orm.Factory import get_database_backend
-from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
-from service.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
+from common.tools.service.GenericGrpcService import GenericGrpcService
 from service.proto.service_pb2_grpc import add_ServiceServiceServicer_to_server
 from .ServiceServiceServicerImpl import ServiceServiceServicerImpl
 from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
 
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
+class ServiceService(GenericGrpcService):
+    def __init__(self, service_handler_factory : ServiceHandlerFactory, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.SERVICE)
+        super().__init__(port, cls_name=cls_name)
+        database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
+        self.service_servicer = ServiceServiceServicerImpl(database, service_handler_factory)
 
-class ServiceService:
-    def __init__(
-        self, context_client : ContextClient, device_client : DeviceClient,
-        service_handler_factory : ServiceHandlerFactory,
-        address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS,
-        grace_period=GRPC_GRACE_PERIOD):
-
-        self.context_client = context_client
-        self.device_client = device_client
-        self.service_handler_factory = service_handler_factory
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.service_servicer = None
-        self.health_servicer = None
-        self.pool = None
-        self.server = None
-
-        self.database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
-
-        self.service_servicer = ServiceServiceServicerImpl(
-            self.context_client, self.device_client, self.database, self.service_handler_factory)
+    def install_servicers(self):
         add_ServiceServiceServicer_to_server(self.service_servicer, self.server)
-
-        self.health_servicer = HealthServicer(
-            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
-        add_HealthServicer_to_server(self.health_servicer, self.server)
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        LOGGER.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-        self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
-
-        LOGGER.debug('Service started')
-
-    def stop(self):
-        LOGGER.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.health_servicer.enter_graceful_shutdown()
-        self.server.stop(self.grace_period)
-        LOGGER.debug('Service stopped')
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index 7720699321daa4f1b77af60a9059e3fc9d7eec79..f34b99d6367d12f4d6d995db2f5fc1a691933bca 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -39,13 +39,10 @@ METHOD_NAMES = ['CreateService', 'UpdateService', 'DeleteService']
 METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
 
 class ServiceServiceServicerImpl(ServiceServiceServicer):
-    def __init__(
-        self, context_client : ContextClient, device_client : DeviceClient, database : Database,
-        service_handler_factory : ServiceHandlerFactory):
-
+    def __init__(self, database : Database, service_handler_factory : ServiceHandlerFactory) -> None:
         LOGGER.debug('Creating Servicer...')
-        self.context_client = context_client
-        self.device_client = device_client
+        self.context_client = ContextClient()
+        self.device_client = DeviceClient()
         self.database = database
         self.service_handler_factory = service_handler_factory
         LOGGER.debug('Servicer Created')
diff --git a/src/service/service/__main__.py b/src/service/service/__main__.py
index cc1b008958572020ebbe54fdfcda27ed4218f80e..1a67a309ff19bda2bf3174c80dfb908e99f72d14 100644
--- a/src/service/service/__main__.py
+++ b/src/service/service/__main__.py
@@ -14,12 +14,10 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_setting, wait_for_environment_variables
-from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
-from service.Config import (
-    CONTEXT_SERVICE_HOST, CONTEXT_SERVICE_PORT, DEVICE_SERVICE_HOST, DEVICE_SERVICE_PORT, GRPC_SERVICE_PORT,
-    GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD, LOG_LEVEL, METRICS_PORT)
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    wait_for_environment_variables)
 from .ServiceService import ServiceService
 from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
 from .service_handlers import SERVICE_HANDLERS
@@ -34,51 +32,31 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
 def main():
     global LOGGER # pylint: disable=global-statement
 
-    grpc_service_port    = get_setting('SERVICESERVICE_SERVICE_PORT_GRPC', default=GRPC_SERVICE_PORT   )
-    max_workers          = get_setting('MAX_WORKERS',                      default=GRPC_MAX_WORKERS    )
-    grace_period         = get_setting('GRACE_PERIOD',                     default=GRPC_GRACE_PERIOD   )
-    log_level            = get_setting('LOG_LEVEL',                        default=LOG_LEVEL           )
-    metrics_port         = get_setting('METRICS_PORT',                     default=METRICS_PORT        )
-
+    log_level = get_log_level()
     logging.basicConfig(level=log_level)
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        'CONTEXTSERVICE_SERVICE_HOST', 'CONTEXTSERVICE_SERVICE_PORT_GRPC',
-        'DEVICESERVICE_SERVICE_HOST', 'DEVICESERVICE_SERVICE_PORT_GRPC'
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
-    context_service_host = get_setting('CONTEXTSERVICE_SERVICE_HOST',      default=CONTEXT_SERVICE_HOST)
-    context_service_port = get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC', default=CONTEXT_SERVICE_PORT)
-    device_service_host  = get_setting('DEVICESERVICE_SERVICE_HOST',       default=DEVICE_SERVICE_HOST )
-    device_service_port  = get_setting('DEVICESERVICE_SERVICE_PORT_GRPC',  default=DEVICE_SERVICE_PORT )
-
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
     LOGGER.info('Starting...')
 
     # Start metrics server
+    metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
-    # Initialize Context Client
-    if context_service_host is None or context_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Context component'.format(
-            str(context_service_host), str(context_service_port)))
-    context_client = ContextClient(context_service_host, context_service_port)
-
-    # Initialize Device Client
-    if device_service_host is None or device_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Device component'.format(
-            str(device_service_host), str(device_service_port)))
-    device_client = DeviceClient(device_service_host, device_service_port)
-
+    # Initialize ServiceHandler Factory
     service_handler_factory = ServiceHandlerFactory(SERVICE_HANDLERS)
 
     # Starting service service
-    grpc_service = ServiceService(
-        context_client, device_client, service_handler_factory, port=grpc_service_port, max_workers=max_workers,
-        grace_period=grace_period)
+    grpc_service = ServiceService(service_handler_factory)
     grpc_service.start()
 
     # Wait for Ctrl+C or termination signal
diff --git a/src/service/tests/MockService_Dependencies.py b/src/service/tests/MockService_Dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..8194ba94347b6439e9d71fda1c79a94b4ddb3f9d
--- /dev/null
+++ b/src/service/tests/MockService_Dependencies.py
@@ -0,0 +1,49 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import Union
+from common.Constants import ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.tests.MockServicerImpl_Device import MockServicerImpl_Device
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from context.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from device.proto.device_pb2_grpc import add_DeviceServiceServicer_to_server
+
+LOCAL_HOST = '127.0.0.1'
+
+SERVICE_CONTEXT = ServiceNameEnum.CONTEXT
+SERVICE_DEVICE = ServiceNameEnum.DEVICE
+
+class MockService_Dependencies(GenericGrpcService):
+    # Mock Service implementing Context and Device to simplify unitary tests of Device
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+        self.device_servicer = MockServicerImpl_Device()
+        add_DeviceServiceServicer_to_server(self.device_servicer, self.server)
+
+    def configure_env_vars(self):
+        os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
+
+        os.environ[get_env_var_name(SERVICE_DEVICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
diff --git a/src/service/tests/PrepareTestScenario.py b/src/service/tests/PrepareTestScenario.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcf3cd156ab04e932e440837dc8ca0df645dc0cc
--- /dev/null
+++ b/src/service/tests/PrepareTestScenario.py
@@ -0,0 +1,68 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest, os
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from service.service.ServiceService import ServiceService
+from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
+from service.service.service_handlers import SERVICE_HANDLERS
+from service.tests.MockService_Dependencies import MockService_Dependencies
+
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+SERVICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.SERVICE) # avoid privileged ports
+os.environ[get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(SERVICE_SERVICE_PORT)
+
+@pytest.fixture(scope='session')
+def mock_service():
+    _service = MockService_Dependencies(MOCKSERVICE_PORT)
+    _service.configure_env_vars()
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def context_client(mock_service : MockService_Dependencies): # pylint: disable=redefined-outer-name
+    _client = ContextClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def device_client(mock_service : MockService_Dependencies): # pylint: disable=redefined-outer-name
+    _client = DeviceClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def service_service(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient):  # pylint: disable=redefined-outer-name
+
+    _service_handler_factory = ServiceHandlerFactory(SERVICE_HANDLERS)
+    _service = ServiceService(_service_handler_factory)
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def service_client(service_service : ServiceService): # pylint: disable=redefined-outer-name
+    _client = ServiceClient()
+    yield _client
+    _client.close()
diff --git a/src/service/tests/test_unitary.py b/src/service/tests/test_unitary.py
index 812a65c5c4353925dcf0b186a0ebb5401a4ecfa1..60fd17371771c6d5764b18255595f8b4520e8447 100644
--- a/src/service/tests/test_unitary.py
+++ b/src/service/tests/test_unitary.py
@@ -12,90 +12,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, grpc, logging, os, pytest
-from common.tests.MockService import MockService
-from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
-from common.tests.MockServicerImpl_Device import MockServicerImpl_Device
+import copy, grpc, logging, pytest
 from common.tests.PytestGenerateTests import pytest_generate_tests # (required) pylint: disable=unused-import
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from context.proto.context_pb2 import Context, ContextId, DeviceId, Link, LinkId, Topology, Device, TopologyId
-from context.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 from device.client.DeviceClient import DeviceClient
-from device.proto.device_pb2_grpc import add_DeviceServiceServicer_to_server
-from service.Config import (
-    GRPC_SERVICE_PORT as SERVICE_GRPC_SERVICE_PORT, GRPC_MAX_WORKERS as SERVICE_GRPC_MAX_WORKERS,
-    GRPC_GRACE_PERIOD as SERVICE_GRPC_GRACE_PERIOD)
 from service.client.ServiceClient import ServiceClient
 from service.proto.context_pb2 import Service, ServiceId
-from service.service.ServiceService import ServiceService
-from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
-from service.service.service_handlers import SERVICE_HANDLERS
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+    # be careful, order of symbols is important here!
+    mock_service, service_service, context_client, device_client, service_client)
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-SERVICE_GRPC_SERVICE_PORT = 10000 + SERVICE_GRPC_SERVICE_PORT # avoid privileged ports
-
-LOCALHOST = '127.0.0.1'
-MOCKSERVER_GRPC_PORT = 10000
-
-class MockService_Combined(MockService):
-    # Mock Server implementing Context and Service to simplify unitary tests of Compute
-
-    def __init__(self, cls_name='MockService_Service'):
-        super().__init__(LOCALHOST, MOCKSERVER_GRPC_PORT, cls_name=cls_name)
-
-    # pylint: disable=attribute-defined-outside-init
-    def install_servicers(self):
-        self.context_servicer = MockServicerImpl_Context()
-        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
-        self.device_servicer = MockServicerImpl_Device()
-        add_DeviceServiceServicer_to_server(self.device_servicer, self.server)
-
-os.environ['CONTEXTSERVICE_SERVICE_HOST'] = LOCALHOST
-os.environ['CONTEXTSERVICE_SERVICE_PORT_GRPC'] = str(MOCKSERVER_GRPC_PORT)
-os.environ['DEVICESERVICE_SERVICE_HOST'] = LOCALHOST
-os.environ['DEVICESERVICE_SERVICE_PORT_GRPC'] = str(MOCKSERVER_GRPC_PORT)
-
-@pytest.fixture(scope='session')
-def mockservice():
-    _service = MockService_Combined()
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def context_client(mockservice : MockService_Combined): # pylint: disable=redefined-outer-name
-    _client = ContextClient(address=LOCALHOST, port=MOCKSERVER_GRPC_PORT)
-    yield _client
-    _client.close()
-
-@pytest.fixture(scope='session')
-def device_client(mockservice : MockService_Combined): # pylint: disable=redefined-outer-name
-    _client = DeviceClient(address=LOCALHOST, port=MOCKSERVER_GRPC_PORT)
-    yield _client
-    _client.close()
-
-@pytest.fixture(scope='session')
-def service_service(
-    context_client : ContextClient, # pylint: disable=redefined-outer-name
-    device_client : DeviceClient):  # pylint: disable=redefined-outer-name
-
-    _service_handler_factory = ServiceHandlerFactory(SERVICE_HANDLERS)
-    _service = ServiceService(
-        context_client, device_client, _service_handler_factory,
-        port=SERVICE_GRPC_SERVICE_PORT, max_workers=SERVICE_GRPC_MAX_WORKERS, grace_period=SERVICE_GRPC_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def service_client(service_service : ServiceService): # pylint: disable=redefined-outer-name
-    _client = ServiceClient(address=LOCALHOST, port=SERVICE_GRPC_SERVICE_PORT)
-    yield _client
-    _client.close()
-
 try:
     from .ServiceHandlersToTest import SERVICE_HANDLERS_TO_TEST
 except ImportError:
diff --git a/src/slice/Config.py b/src/slice/Config.py
index e6d770d000cc249d73cccf17dd17f21ccb001f7d..70a33251242c51f49140e596b8208a19dd5245f7 100644
--- a/src/slice/Config.py
+++ b/src/slice/Config.py
@@ -1,22 +1,14 @@
-import logging
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
-# General settings
-LOG_LEVEL = logging.WARNING
-
-# gRPC settings
-GRPC_SERVICE_PORT = 4040
-GRPC_MAX_WORKERS  = 10
-GRPC_GRACE_PERIOD = 60
-
-# Prometheus settings
-METRICS_PORT = 9192
-
-# Dependency micro-service connection settings
-CONTEXT_SERVICE_HOST = '127.0.0.1'
-CONTEXT_SERVICE_PORT = 1010
-
-SERVICE_SERVICE_HOST = '127.0.0.1'
-SERVICE_SERVICE_PORT = 3030
-
-INTERDOMAIN_SERVICE_HOST = '127.0.0.1'
-INTERDOMAIN_SERVICE_PORT = 10010
diff --git a/src/slice/client/SliceClient.py b/src/slice/client/SliceClient.py
index 5566108f83a68cbac695a117b17b7a3fd1bd3de1..d1783e882faf2ea0e89fd3d1e034d8cba02dc24b 100644
--- a/src/slice/client/SliceClient.py
+++ b/src/slice/client/SliceClient.py
@@ -13,6 +13,8 @@
 # limitations under the License.
 
 import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from slice.proto.context_pb2 import Empty, Slice, SliceId
@@ -24,8 +26,10 @@ DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
 RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
 
 class SliceClient:
-    def __init__(self, address, port):
-        self.endpoint = '{:s}:{:s}'.format(str(address), str(port))
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.SLICE)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.SLICE)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
         LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
         self.channel = None
         self.stub = None
diff --git a/src/slice/service/SliceService.py b/src/slice/service/SliceService.py
index a7ad2694698333d0450aef9e670dd2a4fe9b30e5..7121ae4676d63977953bb3f66cb0754c8d89de88 100644
--- a/src/slice/service/SliceService.py
+++ b/src/slice/service/SliceService.py
@@ -12,65 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
-from concurrent import futures
-from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
-from grpc_health.v1.health_pb2 import HealthCheckResponse
-from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
-from context.client.ContextClient import ContextClient
-from interdomain.client.InterdomainClient import InterdomainClient
-from service.client.ServiceClient import ServiceClient
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
 from slice.proto.slice_pb2_grpc import add_SliceServiceServicer_to_server
 from slice.service.SliceServiceServicerImpl import SliceServiceServicerImpl
-from slice.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
 
-BIND_ADDRESS = '0.0.0.0'
-LOGGER = logging.getLogger(__name__)
+class SliceService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.SLICE)
+        super().__init__(port, cls_name=cls_name)
+        self.slice_servicer = SliceServiceServicerImpl()
 
-class SliceService:
-    def __init__(
-        self, context_client : ContextClient, interdomain_client : InterdomainClient, service_client : ServiceClient,
-        address=BIND_ADDRESS, port=GRPC_SERVICE_PORT, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD):
-
-        self.context_client = context_client
-        self.interdomain_client = interdomain_client
-        self.service_client = service_client
-        self.address = address
-        self.port = port
-        self.endpoint = None
-        self.max_workers = max_workers
-        self.grace_period = grace_period
-        self.slice_servicer = None
-        self.health_servicer = None
-        self.pool = None
-        self.server = None
-
-    def start(self):
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(self.port))
-        LOGGER.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
-            str(self.endpoint), str(self.max_workers)))
-
-        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
-        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
-
-        self.slice_servicer = SliceServiceServicerImpl(
-            self.context_client, self.interdomain_client, self.service_client)
+    def install_servicers(self):
         add_SliceServiceServicer_to_server(self.slice_servicer, self.server)
-
-        self.health_servicer = HealthServicer(
-            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
-        add_HealthServicer_to_server(self.health_servicer, self.server)
-
-        port = self.server.add_insecure_port(self.endpoint)
-        self.endpoint = '{:s}:{:s}'.format(str(self.address), str(port))
-        LOGGER.info('Listening on {:s}...'.format(str(self.endpoint)))
-        self.server.start()
-        self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
-
-        LOGGER.debug('Service started')
-
-    def stop(self):
-        LOGGER.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period)))
-        self.health_servicer.enter_graceful_shutdown()
-        self.server.stop(self.grace_period)
-        LOGGER.debug('Service stopped')
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index bd26d19435092bd8ebea5748d49488975d4d675b..eae45240066cb4f88fd095ac3966daa5a270d5f4 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -28,17 +28,14 @@ METHOD_NAMES = ['CreateSlice', 'UpdateSlice', 'DeleteSlice']
 METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
 
 class SliceServiceServicerImpl(SliceServiceServicer):
-    def __init__(
-        self, context_client : ContextClient, interdomain_client : InterdomainClient, service_client : ServiceClient
-    ):
+    def __init__(self):
         LOGGER.debug('Creating Servicer...')
-        self.context_client = context_client
-        self.interdomain_client = interdomain_client
-        self.service_client = service_client
         LOGGER.debug('Servicer Created')
 
     def create_update(self, request : Slice) -> SliceId:
-        slice_id = self.context_client.SetSlice(request)
+        context_client = ContextClient()
+
+        slice_id = context_client.SetSlice(request)
         if len(request.slice_endpoint_ids) != 2: return slice_id
 
         domains = set()
@@ -48,7 +45,8 @@ class SliceServiceServicerImpl(SliceServiceServicer):
 
         is_multi_domain = len(domains) == 2
         if is_multi_domain:
-            slice_id = self.interdomain_client.RequestSlice(request)
+            interdomain_client = InterdomainClient()
+            slice_id = interdomain_client.RequestSlice(request)
         else:
             # pylint: disable=no-member
             service_request = Service()
@@ -57,7 +55,8 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
             service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
 
-            service_reply = self.service_client.CreateService(service_request)
+            service_client = ServiceClient()
+            service_reply = service_client.CreateService(service_request)
             if service_reply != service_request.service_id: # pylint: disable=no-member
                 raise Exception('Service creation failed. Wrong Service Id was returned')
 
@@ -84,7 +83,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
                      'address_ip': '0.0.0.0', 'address_prefix': 0},
                     sort_keys=True)
 
-            service_reply = self.service_client.UpdateService(service_request)
+            service_reply = service_client.UpdateService(service_request)
             if service_reply != service_request.service_id: # pylint: disable=no-member
                 raise Exception('Service update failed. Wrong Service Id was returned')
 
@@ -92,29 +91,29 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             reply.CopyFrom(request)
             slice_service_id = reply.slice_service_ids.add()
             slice_service_id.CopyFrom(service_reply)
-            self.context_client.SetSlice(reply)
+            context_client.SetSlice(reply)
             slice_id = reply.slice_id
 
-        slice_ = self.context_client.GetSlice(slice_id)
+        slice_ = context_client.GetSlice(slice_id)
         slice_active = Slice()
         slice_active.CopyFrom(slice_)
         slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE
-        self.context_client.SetSlice(slice_active)
+        context_client.SetSlice(slice_active)
         return slice_id
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def CreateSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
         #try:
-        #    slice_ = self.context_client.GetSlice(request.slice_id)
+        #    slice_ = context_client.GetSlice(request.slice_id)
         #    slice_id = slice_.slice_id
         #except grpc.RpcError:
-        #    slice_id = self.context_client.SetSlice(request)
+        #    slice_id = context_client.SetSlice(request)
         #return slice_id
         return self.create_update(request)
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def UpdateSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
-        #slice_id = self.context_client.SetSlice(request)
+        #slice_id = context_client.SetSlice(request)
         #if len(request.slice_endpoint_ids) != 2: return slice_id
         #
         #domains = set()
@@ -124,7 +123,8 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         #
         #is_multi_domain = len(domains) == 2
         #if is_multi_domain:
-        #    return self.interdomain_client.LookUpSlice(request)
+        #    interdomain_client = InterdomainClient()
+        #    return interdomain_client.LookUpSlice(request)
         #else:
         #    raise NotImplementedError('Slice should create local services for single domain slice')
         return self.create_update(request)
diff --git a/src/slice/service/__main__.py b/src/slice/service/__main__.py
index 76bb5fa34eac45c828413d4671e023958a886d1b..f77d86bffe9b722f414be4f85adcaf0ef2cc4a8e 100644
--- a/src/slice/service/__main__.py
+++ b/src/slice/service/__main__.py
@@ -14,14 +14,8 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_setting, wait_for_environment_variables
-from context.client.ContextClient import ContextClient
-from interdomain.client.InterdomainClient import InterdomainClient
-from service.client.ServiceClient import ServiceClient
-from slice.Config import (
-    CONTEXT_SERVICE_HOST, CONTEXT_SERVICE_PORT, GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD,
-    INTERDOMAIN_SERVICE_HOST, INTERDOMAIN_SERVICE_PORT, LOG_LEVEL, METRICS_PORT, SERVICE_SERVICE_HOST,
-    SERVICE_SERVICE_PORT)
+from common.Constants import ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables
 from .SliceService import SliceService
 
 terminate = threading.Event()
@@ -34,58 +28,28 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
 def main():
     global LOGGER # pylint: disable=global-statement
 
-    grpc_service_port        = get_setting('SLICESERVICE_SERVICE_PORT_GRPC',       default=GRPC_SERVICE_PORT       )
-    max_workers              = get_setting('MAX_WORKERS',                          default=GRPC_MAX_WORKERS        )
-    grace_period             = get_setting('GRACE_PERIOD',                         default=GRPC_GRACE_PERIOD       )
-    log_level                = get_setting('LOG_LEVEL',                            default=LOG_LEVEL               )
-    metrics_port             = get_setting('METRICS_PORT',                         default=METRICS_PORT            )
-
+    log_level = get_log_level()
     logging.basicConfig(level=log_level)
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        'CONTEXTSERVICE_SERVICE_HOST', 'CONTEXTSERVICE_SERVICE_PORT_GRPC',
-        'INTERDOMAINSERVICE_SERVICE_HOST', 'INTERDOMAINSERVICE_SERVICE_PORT_GRPC',
-        'SERVICESERVICE_SERVICE_HOST', 'SERVICESERVICE_SERVICE_PORT_GRPC',
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
-    context_service_host     = get_setting('CONTEXTSERVICE_SERVICE_HOST',          default=CONTEXT_SERVICE_HOST    )
-    context_service_port     = get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC',     default=CONTEXT_SERVICE_PORT    )
-    interdomain_service_host = get_setting('INTERDOMAINSERVICE_SERVICE_HOST',      default=INTERDOMAIN_SERVICE_HOST)
-    interdomain_service_port = get_setting('INTERDOMAINSERVICE_SERVICE_PORT_GRPC', default=INTERDOMAIN_SERVICE_PORT)
-    service_service_host     = get_setting('SERVICESERVICE_SERVICE_HOST',          default=SERVICE_SERVICE_HOST    )
-    service_service_port     = get_setting('SERVICESERVICE_SERVICE_PORT_GRPC',     default=SERVICE_SERVICE_PORT    )
-
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
     LOGGER.info('Starting...')
 
     # Start metrics server
+    metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
-    # Initialize Context Client
-    if context_service_host is None or context_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Context component'.format(
-            str(context_service_host), str(context_service_port)))
-    context_client = ContextClient(context_service_host, context_service_port)
-
-    # Initialize Interdomain Client
-    if interdomain_service_host is None or interdomain_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Interdomain component'.format(
-            str(interdomain_service_host), str(interdomain_service_port)))
-    interdomain_client = InterdomainClient(interdomain_service_host, interdomain_service_port)
-
-    # Initialize Service Client
-    if service_service_host is None or service_service_port is None:
-        raise Exception('Wrong address({:s}):port({:s}) of Service component'.format(
-            str(service_service_host), str(service_service_port)))
-    service_client = ServiceClient(service_service_host, service_service_port)
-
     # Starting slice service
-    grpc_service = SliceService(
-        context_client, interdomain_client, service_client, port=grpc_service_port, max_workers=max_workers,
-        grace_period=grace_period)
+    grpc_service = SliceService()
     grpc_service.start()
 
     # Wait for Ctrl+C or termination signal
diff --git a/src/slice/tests/test_unitary.py b/src/slice/tests/test_unitary.py
index fb58b51536793ab6029da1fc3c1444591d6f2bdd..1fd5a75b4dc1bb0192c8eeda6a0d452c0a9465dd 100644
--- a/src/slice/tests/test_unitary.py
+++ b/src/slice/tests/test_unitary.py
@@ -12,34 +12,32 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, grpc, logging, pytest
-from common.database.Factory import get_database, DatabaseEngineEnum
+import logging, os, pytest
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
 from slice.client.SliceClient import SliceClient
-from slice.proto.slice_pb2 import TransportSlice
 from slice.service.SliceService import SliceService
-from slice.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
 
-port = 10000 + GRPC_SERVICE_PORT # avoid privileged ports
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+SLICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.SLICE) # avoid privileged ports
+os.environ[get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(SLICE_SERVICE_PORT)
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
 @pytest.fixture(scope='session')
-def slice_database():
-    _database = get_database(engine=DatabaseEngineEnum.INMEMORY)
-    return _database
-
-@pytest.fixture(scope='session')
-def slice_service(slice_database):
-    _service = SliceService(
-        slice_database, port=port, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD)
+def slice_service():
+    _service = SliceService()
     _service.start()
     yield _service
     _service.stop()
 
 @pytest.fixture(scope='session')
-def slice_client(slice_service):
-    _client = SliceClient(address='127.0.0.1', port=port)
+def slice_client(slice_service : SliceService): # pylint: disable=redefined-outer-name
+    _client = SliceClient()
     yield _client
     _client.close()