diff --git a/deploy/crdb.sh b/deploy/crdb.sh
index 414de523d10f7d1edb99799e1f5889b340d8ad04..5d87adf60bbb3303e9abe9cc17c4a68cbe295370 100755
--- a/deploy/crdb.sh
+++ b/deploy/crdb.sh
@@ -70,7 +70,7 @@ TMP_FOLDER="./tmp"
 CRDB_MANIFESTS_PATH="manifests/cockroachdb"
 
 # Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${CRDB_NAMESPACE}/manifests"
 mkdir -p $TMP_MANIFESTS_FOLDER
 
 function crdb_deploy_single() {
diff --git a/deploy/qdb.sh b/deploy/qdb.sh
index cba8a5c00fa349a032070c81e11cf99b1209cfd8..e930b5a6cfdba8897ec138e97e3115da8917554f 100755
--- a/deploy/qdb.sh
+++ b/deploy/qdb.sh
@@ -64,12 +64,14 @@ TMP_FOLDER="./tmp"
 QDB_MANIFESTS_PATH="manifests/questdb"
 
 # Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${QDB_NAMESPACE}/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
 
 function qdb_deploy() {
     echo "QuestDB Namespace"
     echo ">>> Create QuestDB Namespace (if missing)"
     kubectl create namespace ${QDB_NAMESPACE}
+    sleep 2
     echo
 
     echo "QuestDB"
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 019fcfa9ea8095207fd26f6d96e4e33626534357..a778d9b1f3e7948baca02d1da7c396cf723d6438 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -124,14 +124,15 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${TFS_K8S_NAMESPACE}/manifests"
 mkdir -p $TMP_MANIFESTS_FOLDER
-TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+TMP_LOGS_FOLDER="${TMP_FOLDER}/${TFS_K8S_NAMESPACE}/logs"
 mkdir -p $TMP_LOGS_FOLDER
 
 echo "Deleting and Creating a new namespace..."
 kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found
 kubectl create namespace $TFS_K8S_NAMESPACE
+sleep 2
 printf "\n"
 
 echo "Create secret with CockroachDB data"
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index f9a6d987d18bb3d994538c85b2ec14024553b45b..22c0f5f9d124b76d0e477dce35d14811204c1496 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -23,6 +23,9 @@ spec:
   replicas: 1
   template:
     metadata:
+      annotations:
+        # Required for IETF L2VPN SBI when both parent and child run in same K8s cluster with Linkerd
+        config.linkerd.io/skip-outbound-ports: "8002"
       labels:
         app: deviceservice
     spec:
diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml
index 79acf96def508837fd0b3def816d3e4c4e20a368..067f9432749f7d2c986503b034e55a13c8f2b210 100644
--- a/manifests/interdomainservice.yaml
+++ b/manifests/interdomainservice.yaml
@@ -44,11 +44,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:10010"]
         resources:
           requests:
-            cpu: 50m
+            cpu: 250m
             memory: 64Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 1000m
+            memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml
index 5db05d4af0e3594edd6186a5adbcb6733ed7d5c8..80caefa935d00d49cb311e586d2a6cc5206b8d02 100644
--- a/manifests/nginx_ingress_http.yaml
+++ b/manifests/nginx_ingress_http.yaml
@@ -36,13 +36,6 @@ spec:
               name: webuiservice
               port:
                 number: 3000
-        #- path: /context(/|$)(.*)
-        #  pathType: Prefix
-        #  backend:
-        #    service:
-        #      name: contextservice
-        #      port:
-        #        number: 8080
         - path: /()(restconf/.*)
           pathType: Prefix
           backend:
diff --git a/src/common/Settings.py b/src/common/Settings.py
index ea161e55590a54f1defc53c3c833f5a4302af972..1efe80db72cc47ba26a32241cc0bf9c15e866176 100644
--- a/src/common/Settings.py
+++ b/src/common/Settings.py
@@ -37,16 +37,25 @@ ENVVAR_SUFIX_SERVICE_HOST         = 'SERVICE_HOST'
 ENVVAR_SUFIX_SERVICE_PORT_GRPC    = 'SERVICE_PORT_GRPC'
 ENVVAR_SUFIX_SERVICE_PORT_HTTP    = 'SERVICE_PORT_HTTP'
 
+def find_missing_environment_variables(
+    required_environment_variables : List[str] = []
+) -> List[str]:
+    if ENVVAR_KUBERNETES_PORT in os.environ:
+        missing_variables = set(required_environment_variables).difference(set(os.environ.keys()))
+    else:
+        # We're not running in Kubernetes, nothing to wait for
+        missing_variables = required_environment_variables
+    return missing_variables
+
 def wait_for_environment_variables(
     required_environment_variables : List[str] = [], wait_delay_seconds : float = DEFAULT_RESTART_DELAY
 ):
-    if ENVVAR_KUBERNETES_PORT not in os.environ: return # We're not running in Kubernetes, nothing to wait for
-    missing_variables = set(required_environment_variables).difference(set(os.environ.keys()))
+    missing_variables = find_missing_environment_variables(required_environment_variables)
     if len(missing_variables) == 0: return # We have all environment variables defined
     msg = 'Variables({:s}) are missing in Environment({:s}), restarting in {:f} seconds...'
     LOGGER.error(msg.format(str(missing_variables), str(os.environ), wait_delay_seconds))
     time.sleep(wait_delay_seconds)
-    raise Exception('Restarting...')
+    raise Exception('Restarting...') # pylint: disable=broad-exception-raised
 
 def get_setting(name, **kwargs):
     value = os.environ.get(name)
@@ -54,6 +63,7 @@ def get_setting(name, **kwargs):
         value = kwargs['settings'].pop(name, value)
     if value is not None: return value
     if 'default' in kwargs: return kwargs['default']
+    # pylint: disable=broad-exception-raised
     raise Exception('Setting({:s}) not specified in environment or configuration'.format(str(name)))
 
 def get_env_var_name(service_name : ServiceNameEnum, env_var_group):
diff --git a/src/common/message_broker/backend/nats/NatsBackend.py b/src/common/message_broker/backend/nats/NatsBackend.py
index 35de3acb3043f5b0a7a08cb9a441bffe8af3462e..bcbf2a721e5d41ef122f239ee5a536eb575edcbe 100644
--- a/src/common/message_broker/backend/nats/NatsBackend.py
+++ b/src/common/message_broker/backend/nats/NatsBackend.py
@@ -19,8 +19,8 @@ from common.message_broker.Message import Message
 from .._Backend import _Backend
 from .NatsBackendThread import NatsBackendThread
 
-NATS_URI_TEMPLATE_AUTH = 'nats://{:s}:{:s}@nats.{:s}.svc.cluster.local:{:s}'
-NATS_URI_TEMPLATE_NOAUTH = 'nats://nats.{:s}.svc.cluster.local:{:s}'
+NATS_URI_TEMPLATE_AUTH = 'nats://{:s}:{:s}@{:s}.{:s}.svc.cluster.local:{:s}'
+NATS_URI_TEMPLATE_NOAUTH = 'nats://{:s}.{:s}.svc.cluster.local:{:s}'
 
 class NatsBackend(_Backend):
     def __init__(self, **settings) -> None: # pylint: disable=super-init-not-called
@@ -32,10 +32,10 @@ class NatsBackend(_Backend):
             nats_password    = get_setting('NATS_PASSWORD', settings=settings, default=None)
             if nats_username is None or nats_password is None:
                 nats_uri = NATS_URI_TEMPLATE_NOAUTH.format(
-                    nats_namespace, nats_client_port)
+                    nats_namespace, nats_namespace, nats_client_port)
             else:
                 nats_uri = NATS_URI_TEMPLATE_AUTH.format(
-                    nats_username, nats_password, nats_namespace, nats_client_port)
+                    nats_username, nats_password, nats_namespace, nats_namespace, nats_client_port)
 
         self._terminate = threading.Event()
         self._nats_backend_thread = NatsBackendThread(nats_uri)
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
index 1e238510c98b83bebde8167711b988d7476e5a99..916a73d300011c62fc008fc7437df8a71f6a9838 100644
--- a/src/common/tools/descriptor/Loader.py
+++ b/src/common/tools/descriptor/Loader.py
@@ -85,6 +85,7 @@ class DescriptorLoader:
         service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None
     ) -> None:
         if (descriptors is None) == (descriptors_file is None):
+            # pylint: disable=broad-exception-raised
             raise Exception('Exactly one of "descriptors" or "descriptors_file" is required')
         
         if descriptors_file is not None:
@@ -238,8 +239,13 @@ class DescriptorLoader:
         self._process_descr('service',    'add',    self.__ctx_cli.SetService,    Service,    self.__services      )
         self._process_descr('slice',      'add',    self.__ctx_cli.SetSlice,      Slice,      self.__slices        )
         self._process_descr('connection', 'add',    self.__ctx_cli.SetConnection, Connection, self.__connections   )
-        self._process_descr('context',    'update', self.__ctx_cli.SetContext,    Context,    self.__contexts      )
-        self._process_descr('topology',   'update', self.__ctx_cli.SetTopology,   Topology,   self.__topologies    )
+
+        # Update context and topology is useless:
+        # - devices and links are assigned to topologies automatically by Context component
+        # - topologies, services, and slices are assigned to contexts automatically by Context component
+        #self._process_descr('context',    'update', self.__ctx_cli.SetContext,    Context,    self.__contexts      )
+        #self._process_descr('topology',   'update', self.__ctx_cli.SetTopology,   Topology,   self.__topologies    )
+
         #self.__ctx_cli.close()
 
     def _load_normal_mode(self) -> None:
@@ -265,8 +271,12 @@ class DescriptorLoader:
         self._process_descr('service',  'update', self.__svc_cli.UpdateService,   Service,  self.__services      )
         self._process_descr('slice',    'add',    self.__slc_cli.CreateSlice,     Slice,    self.__slices_add    )
         self._process_descr('slice',    'update', self.__slc_cli.UpdateSlice,     Slice,    self.__slices        )
-        self._process_descr('context',  'update', self.__ctx_cli.SetContext,      Context,  self.__contexts      )
-        self._process_descr('topology', 'update', self.__ctx_cli.SetTopology,     Topology, self.__topologies    )
+        
+        # Update context and topology is useless:
+        # - devices and links are assigned to topologies automatically by Context component
+        # - topologies, services, and slices are assigned to contexts automatically by Context component
+        #self._process_descr('context',  'update', self.__ctx_cli.SetContext,      Context,  self.__contexts      )
+        #self._process_descr('topology', 'update', self.__ctx_cli.SetTopology,     Topology, self.__topologies    )
 
         #self.__slc_cli.close()
         #self.__svc_cli.close()
diff --git a/src/common/type_checkers/Checkers.py b/src/common/type_checkers/Checkers.py
index 085ba572c39165c8ae11e2a546b9439a4e93962b..e61bd3ccd338b922598a2b7b37af56b22cc11267 100644
--- a/src/common/type_checkers/Checkers.py
+++ b/src/common/type_checkers/Checkers.py
@@ -30,7 +30,7 @@ def chk_attribute(name : str, container : Dict, container_name : str, **kwargs):
     if 'default' in kwargs: return kwargs['default']
     raise AttributeError('Missing object({:s}) in container({:s})'.format(str(name), str(container_name)))
 
-def chk_type(name : str, value : Any, type_or_types : Union[type, Set[type]] = set()) -> Any:
+def chk_type(name : str, value : Any, type_or_types : Union[type, Set[type], Tuple[type]] = set()) -> Any:
     if isinstance(value, type_or_types): return value
     msg = '{}({}) is of a wrong type({}). Accepted type_or_types({}).'
     raise TypeError(msg.format(str(name), str(value), type(value).__name__, str(type_or_types)))
diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py
index f5bfc9dea5fb81fa8becfedc8ce1e4e0f59e7292..76db07a9e30b4f62c4b51574ad95c222a1490f79 100644
--- a/src/context/service/database/Link.py
+++ b/src/context/service/database/Link.py
@@ -18,9 +18,10 @@ from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import Link, LinkId
+from common.proto.context_pb2 import Link, LinkId, TopologyId
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.object_factory.Link import json_link_id
+from context.service.database.uuids.Topology import topology_get_uuid
 from .models.LinkModel import LinkModel, LinkEndPointModel
 from .models.TopologyModel import TopologyLinkModel
 from .uuids.EndPoint import endpoint_get_uuid
@@ -67,6 +68,15 @@ def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]:
 
     topology_uuids : Set[str] = set()
     related_topologies : List[Dict] = list()
+
+    # By default, always add link to default Context/Topology
+    _,topology_uuid = topology_get_uuid(TopologyId(), allow_random=False, allow_default=True)
+    related_topologies.append({
+        'topology_uuid': topology_uuid,
+        'link_uuid'    : link_uuid,
+    })
+    topology_uuids.add(topology_uuid)
+
     link_endpoints_data : List[Dict] = list()
     for i,endpoint_id in enumerate(request.link_endpoint_ids):
         endpoint_topology_uuid, _, endpoint_uuid = endpoint_get_uuid(
diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py
index 6a62a75e71f0e02adb7fb1b70e4568b382494980..8717254cb59ad1b83a6e65ca3c1ba68757663674 100644
--- a/src/device/service/Tools.py
+++ b/src/device/service/Tools.py
@@ -174,7 +174,6 @@ def populate_endpoints(
         elif resource_key.startswith('/endpoints/endpoint'):
             endpoint_uuid = resource_value['uuid']
             _device_uuid = resource_value.get('device_uuid')
-            endpoint_name = resource_value.get('name')
 
             if _device_uuid is None:
                 # add endpoint to current device
@@ -185,11 +184,17 @@ def populate_endpoints(
                 device_endpoint = new_sub_devices[_device_uuid].device_endpoints.add()
                 device_endpoint.endpoint_id.device_id.device_uuid.uuid = _device_uuid
 
-            device_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
-            device_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME
-            
             device_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid
+
+            endpoint_context_uuid = resource_value.get('context_uuid', DEFAULT_CONTEXT_NAME)
+            device_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = endpoint_context_uuid
+
+            endpoint_topology_uuid = resource_value.get('topology_uuid', DEFAULT_TOPOLOGY_NAME)
+            device_endpoint.endpoint_id.topology_id.topology_uuid.uuid = endpoint_topology_uuid
+
+            endpoint_name = resource_value.get('name')
             if endpoint_name is not None: device_endpoint.name = endpoint_name
+
             device_endpoint.endpoint_type = resource_value.get('type', '-')
 
             sample_types : Dict[int, str] = resource_value.get('sample_types', {})
diff --git a/src/device/service/drivers/emulated/Tools.py b/src/device/service/drivers/emulated/Tools.py
index 4770cc6e6d2b2ccbf86d1e3764e62f03b48837e2..0ac92bf56d5538a5ed4d3e7c53bc480d5ecd40bd 100644
--- a/src/device/service/drivers/emulated/Tools.py
+++ b/src/device/service/drivers/emulated/Tools.py
@@ -13,34 +13,74 @@
 # limitations under the License.
 
 import logging
-from typing import Any, Dict, Tuple
+from typing import Any, Dict, Optional, Tuple
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.type_checkers.Checkers import chk_attribute, chk_string, chk_type
 from device.service.driver_api._Driver import RESOURCE_ENDPOINTS
 from .Constants import SPECIAL_RESOURCE_MAPPINGS
 
 LOGGER = logging.getLogger(__name__)
 
-def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Tuple[str, Any]:
-    endpoint_uuid = endpoint_data.get('uuid')
-    if endpoint_uuid is None: return None
-    endpoint_resource_path = SPECIAL_RESOURCE_MAPPINGS.get(RESOURCE_ENDPOINTS)
-    endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid)
-
-    endpoint_type = endpoint_data.get('type')
-    if endpoint_type is None: return None
-
-    endpoint_sample_types = endpoint_data.get('sample_types')
-    if endpoint_sample_types is None: return None
-
-    sample_types = {}
-    for endpoint_sample_type in endpoint_sample_types:
-        try:
-            metric_name = KpiSampleType.Name(endpoint_sample_type).lower().replace('kpisampletype_', '')
-        except: # pylint: disable=bare-except
-            LOGGER.warning('Unsupported EndPointSampleType({:s})'.format(str(endpoint_sample_type)))
-            continue
-        monitoring_resource_key = '{:s}/state/{:s}'.format(endpoint_resource_key, metric_name)
-        sample_types[endpoint_sample_type] = monitoring_resource_key
-
-    endpoint_resource_value = {'uuid': endpoint_uuid, 'type': endpoint_type, 'sample_types': sample_types}
-    return endpoint_resource_key, endpoint_resource_value
+def process_optional_string_field(
+    endpoint_data : Dict[str, Any], field_name : str, endpoint_resource_value : Dict[str, Any]
+) -> None:
+    field_value = chk_attribute(field_name, endpoint_data, 'endpoint_data', default=None)
+    if field_value is None: return
+    chk_string('endpoint_data.{:s}'.format(field_name), field_value)
+    if len(field_value) > 0: endpoint_resource_value[field_name] = field_value
+
+def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Optional[Tuple[str, Dict]]:
+    try:
+        # Check type of endpoint_data
+        chk_type('endpoint_data', endpoint_data, dict)
+
+        # Check endpoint UUID (mandatory)
+        endpoint_uuid = chk_attribute('uuid', endpoint_data, 'endpoint_data')
+        chk_string('endpoint_data.uuid', endpoint_uuid, min_length=1)
+        endpoint_resource_path = SPECIAL_RESOURCE_MAPPINGS.get(RESOURCE_ENDPOINTS)
+        endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid)
+        endpoint_resource_value = {'uuid': endpoint_uuid}
+
+        # Check endpoint optional string fields
+        process_optional_string_field(endpoint_data, 'name', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'type', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'context_uuid', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'topology_uuid', endpoint_resource_value)
+
+        # Check endpoint sample types (optional)
+        endpoint_sample_types = chk_attribute('sample_types', endpoint_data, 'endpoint_data', default=[])
+        chk_type('endpoint_data.sample_types', endpoint_sample_types, list)
+        sample_types = {}
+        sample_type_errors = []
+        for i,endpoint_sample_type in enumerate(endpoint_sample_types):
+            field_name = 'endpoint_data.sample_types[{:d}]'.format(i)
+            try:
+                chk_type(field_name, endpoint_sample_type, (int, str))
+                if isinstance(endpoint_sample_type, int):
+                    metric_name = KpiSampleType.Name(endpoint_sample_type)
+                    metric_id = endpoint_sample_type
+                elif isinstance(endpoint_sample_type, str):
+                    metric_id = KpiSampleType.Value(endpoint_sample_type)
+                    metric_name = endpoint_sample_type
+                else:
+                    str_type = str(type(endpoint_sample_type))
+                    raise Exception('Bad format: {:s}'.format(str_type)) # pylint: disable=broad-exception-raised
+            except Exception as e: # pylint: disable=broad-exception-caught
+                MSG = 'Unsupported {:s}({:s}) : {:s}'
+                sample_type_errors.append(MSG.format(field_name, str(endpoint_sample_type), str(e)))
+
+            metric_name = metric_name.lower().replace('kpisampletype_', '')
+            monitoring_resource_key = '{:s}/state/{:s}'.format(endpoint_resource_key, metric_name)
+            sample_types[metric_id] = monitoring_resource_key
+
+        if len(sample_type_errors) > 0:
+            # pylint: disable=broad-exception-raised
+            raise Exception('Malformed Sample Types:\n{:s}'.format('\n'.join(sample_type_errors)))
+
+        if len(sample_types) > 0:
+            endpoint_resource_value['sample_types'] = sample_types
+
+        return endpoint_resource_key, endpoint_resource_value
+    except: # pylint: disable=bare-except
+        LOGGER.exception('Problem composing endpoint({:s})'.format(str(endpoint_data)))
+        return None
diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py
index b72fc1b3122c3d04bde5394ae33d973fa33fa3b8..51c8ee39aa0fc70aa96fe8154cbc312043d2c488 100644
--- a/src/interdomain/service/InterdomainServiceServicerImpl.py
+++ b/src/interdomain/service/InterdomainServiceServicerImpl.py
@@ -13,7 +13,8 @@
 # limitations under the License.
 
 import grpc, logging, uuid
-from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_missing_environment_variables, get_env_var_name
 from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId
 from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
@@ -35,8 +36,6 @@ LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('Interdomain', 'RPC')
 
-USE_DLT = True
-
 class InterdomainServiceServicerImpl(InterdomainServiceServicer):
     def __init__(self, remote_domain_clients : RemoteDomainClients):
         LOGGER.debug('Creating Servicer...')
@@ -48,7 +47,6 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
         context_client = ContextClient()
         pathcomp_client = PathCompClient()
         slice_client = SliceClient()
-        dlt_connector_client = DltConnectorClient()
 
         local_device_uuids = get_local_device_uuids(context_client)
         slice_owner_uuid = request.slice_owner.owner_uuid.uuid
@@ -87,6 +85,17 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
         reply = Slice()
         reply.CopyFrom(request)
 
+        missing_env_vars = find_missing_environment_variables([
+            get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_HOST     ),
+            get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        ])
+        if len(missing_env_vars) == 0:
+            # DLT available
+            dlt_connector_client = DltConnectorClient()
+            dlt_connector_client.connect()
+        else:
+            dlt_connector_client = None
+
         dlt_record_sender = DltRecordSender(context_client, dlt_connector_client)
 
         for domain_uuid, is_local_domain, endpoint_ids in traversed_domains:
@@ -119,7 +128,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
                 LOGGER.info('[loop] [remote] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice)))
                 sub_slice_id = context_client.SetSlice(sub_slice)
 
-                if USE_DLT:
+                if dlt_connector_client is not None:
                     topology_id = TopologyId(**json_topology_id(domain_uuid))
                     dlt_record_sender.add_slice(topology_id, sub_slice)
                 else:
@@ -137,8 +146,9 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
             LOGGER.info('[loop] adding sub-slice')
             reply.slice_subslice_ids.add().CopyFrom(sub_slice_id)   # pylint: disable=no-member
 
-        LOGGER.info('Recording Remote Slice requests to DLT')
-        dlt_record_sender.commit()
+        if dlt_connector_client is not None:
+            LOGGER.info('Recording Remote Slice requests to DLT')
+            dlt_record_sender.commit()
 
         LOGGER.info('Activating interdomain slice')
         reply.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
diff --git a/src/interdomain/service/RemoteDomainClients.py b/src/interdomain/service/RemoteDomainClients.py
index 297c9a60d0cc4c67fda7f746e5fead1837b9d4ee..e28176ef4fad2d3f2e2c6b1d7f0eb8d24116308a 100644
--- a/src/interdomain/service/RemoteDomainClients.py
+++ b/src/interdomain/service/RemoteDomainClients.py
@@ -12,44 +12,101 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, socket
+import logging, threading
+from typing import Optional, Tuple
 from common.Constants import DEFAULT_CONTEXT_NAME, ServiceNameEnum
+from common.DeviceTypes import DeviceTypeEnum
 from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import ConfigActionEnum, DeviceEvent
 from common.proto.context_pb2 import TeraFlowController
+from common.tools.context_queries.Device import get_device
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
 from interdomain.client.InterdomainClient import InterdomainClient
 
 LOGGER = logging.getLogger(__name__)
 
-class RemoteDomainClients:
+def get_domain_data(context_client : ContextClient, event : DeviceEvent) -> Optional[Tuple[str, str, int]]:
+    device_uuid = event.device_id.device_uuid.uuid
+    device = get_device(
+        context_client, device_uuid, include_endpoints=False,
+        include_components=False, include_config_rules=True)
+    if device.device_type != DeviceTypeEnum.NETWORK.value: return None
+    idc_domain_name = device.name
+    idc_domain_address = None
+    idc_domain_port = None
+    for config_rule in device.device_config.config_rules:
+        if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        if config_rule.custom.resource_key == '_connect/address':
+            idc_domain_address = config_rule.custom.resource_value
+        if config_rule.custom.resource_key == '_connect/port':
+            idc_domain_port = int(config_rule.custom.resource_value)
+    if idc_domain_address is None: return None
+    if idc_domain_port is None: return None
+    return idc_domain_name, idc_domain_address, idc_domain_port
+
+class RemoteDomainClients(threading.Thread):
     def __init__(self) -> None:
-        self.peer_domain = {}
+        super().__init__(daemon=True)
+        self.terminate = threading.Event()
+        self.lock = threading.Lock()
+        self.peer_domains = {}
+        self.context_client = ContextClient()
+        self.context_event_collector = EventsCollector(self.context_client)
 
-    def add_peer(
-            self, domain_name : str, host : str, port : int, context_uuid : str = DEFAULT_CONTEXT_NAME
-        ) -> None:
-        while True:
+    def stop(self):
+        self.terminate.set()
+
+    def run(self) -> None:
+        self.context_client.connect()
+        self.context_event_collector.start()
+
+        while not self.terminate.is_set():
+            event = self.context_event_collector.get_event(timeout=0.1)
+            if event is None: continue
+            if not isinstance(event, DeviceEvent): continue
+            LOGGER.info('Processing Event({:s})...'.format(grpc_message_to_json_string(event)))
+            domain_data = get_domain_data(self.context_client, event)
+            domain_name, domain_address, domain_port = domain_data
             try:
-                remote_teraflow_ip = socket.gethostbyname(host)
-                if len(remote_teraflow_ip) > 0: break
-            except socket.gaierror as e:
-                if str(e) == '[Errno -2] Name or service not known': continue
+                self.add_peer(domain_name, domain_address, domain_port)
+            except: # pylint: disable=bare-except
+                MSG = 'Unable to connect to remote domain {:s} ({:s}:{:d})'
+                LOGGER.exception(MSG.format(domain_name, domain_address, domain_port))
 
-        interdomain_client = InterdomainClient(host=host, port=port)
+        self.context_event_collector.stop()
+        self.context_client.close()
+
+    def add_peer(
+        self, domain_name : str, domain_address : str, domain_port : int, context_uuid : str = DEFAULT_CONTEXT_NAME
+    ) -> None:
         request = TeraFlowController()
-        request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME # pylint: disable=no-member
+        request.context_id.context_uuid.uuid = context_uuid # pylint: disable=no-member
         request.ip_address = get_service_host(ServiceNameEnum.INTERDOMAIN)
         request.port = int(get_service_port_grpc(ServiceNameEnum.INTERDOMAIN))
 
+        interdomain_client = InterdomainClient(host=domain_address, port=domain_port)
+        interdomain_client.connect()
+
         reply = interdomain_client.Authenticate(request)
+
         if not reply.authenticated:
-            msg = 'Authentication against {:s}:{:d} rejected'
-            raise Exception(msg.format(str(remote_teraflow_ip), port))
+            MSG = 'Authentication against {:s}:{:d} with Context({:s}) rejected'
+            # pylint: disable=broad-exception-raised
+            raise Exception(MSG.format(domain_address, domain_port, domain_name))
 
-        self.peer_domain[domain_name] = interdomain_client
+        with self.lock:
+            self.peer_domains[domain_name] = interdomain_client
+            LOGGER.info('Added peer domain {:s} ({:s}:{:d})'.format(domain_name, domain_address, domain_port))
 
     def get_peer(self, domain_name : str) -> InterdomainClient:
-        LOGGER.warning('peers: {:s}'.format(str(self.peer_domain)))
-        return self.peer_domain.get(domain_name)
+        with self.lock:
+            LOGGER.warning('peers: {:s}'.format(str(self.peer_domains)))
+            return self.peer_domains.get(domain_name)
 
     def remove_peer(self, domain_name : str) -> None:
-        return self.peer_domain.pop(domain_name, None)
+        with self.lock:
+            self.peer_domains.pop(domain_name, None)
+            LOGGER.info('Removed peer domain {:s}'.format(domain_name))
diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py
index 73fa935399e7161aaf2ade06d51371c879607c3b..f867dc378020f3ef2ca8fb43b3beed538a1ebb9c 100644
--- a/src/interdomain/service/__main__.py
+++ b/src/interdomain/service/__main__.py
@@ -43,8 +43,6 @@ def main():
         get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
         get_env_var_name(ServiceNameEnum.SLICE,    ENVVAR_SUFIX_SERVICE_HOST     ),
         get_env_var_name(ServiceNameEnum.SLICE,    ENVVAR_SUFIX_SERVICE_PORT_GRPC),
-        get_env_var_name(ServiceNameEnum.DLT,      ENVVAR_SUFIX_SERVICE_HOST     ),
-        get_env_var_name(ServiceNameEnum.DLT,      ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     signal.signal(signal.SIGINT,  signal_handler)
@@ -58,6 +56,7 @@ def main():
 
     # Define remote domain clients
     remote_domain_clients = RemoteDomainClients()
+    remote_domain_clients.start()
 
     # Starting Interdomain service
     grpc_service = InterdomainService(remote_domain_clients)
@@ -67,16 +66,13 @@ def main():
     topology_abstractor = TopologyAbstractor()
     topology_abstractor.start()
 
-    # TODO: improve with configuration the definition of the remote peers
-    #interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)
-    #remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc)
-
     # Wait for Ctrl+C or termination signal
     while not terminate.wait(timeout=1.0): pass
 
     LOGGER.info('Terminating...')
     topology_abstractor.stop()
     grpc_service.stop()
+    remote_domain_clients.stop()
 
     LOGGER.info('Bye')
     return 0
diff --git a/src/interdomain/service/_old_code/add_peer_manually.txt b/src/interdomain/service/_old_code/add_peer_manually.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6582044b3dcef3ab81cc7b85655dce85c505aacd
--- /dev/null
+++ b/src/interdomain/service/_old_code/add_peer_manually.txt
@@ -0,0 +1,3 @@
+# TODO: improve with configuration the definition of the remote peers
+#interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)
+#remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc)
diff --git a/src/interdomain/service/topology_abstractor/DltRecordSender.py b/src/interdomain/service/topology_abstractor/DltRecordSender.py
index d6efbc809ef848067d7ee496dca300ab8a04f406..c9a61ef6956acf461124a50e146a2d104ff2e131 100644
--- a/src/interdomain/service/topology_abstractor/DltRecordSender.py
+++ b/src/interdomain/service/topology_abstractor/DltRecordSender.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from typing import Dict, List, Tuple
+from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Device, Link, Service, Slice, TopologyId
 from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId
 from context.client.ContextClient import ContextClient
@@ -23,7 +23,7 @@ from .Types import DltRecordTypes
 LOGGER = logging.getLogger(__name__)
 
 class DltRecordSender:
-    def __init__(self, context_client : ContextClient, dlt_connector_client : DltConnectorClient) -> None:
+    def __init__(self, context_client : ContextClient, dlt_connector_client : Optional[DltConnectorClient]) -> None:
         self.context_client = context_client
         self.dlt_connector_client = dlt_connector_client
         self.dlt_record_uuids : List[str] = list()
@@ -65,24 +65,28 @@ class DltRecordSender:
             topology_id,dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid]
             if isinstance(dlt_record, Device):
                 device_id = self.context_client.SetDevice(dlt_record)
+                if self.dlt_connector_client is None: continue
                 dlt_device_id = DltDeviceId()
                 dlt_device_id.topology_id.CopyFrom(topology_id)     # pylint: disable=no-member
                 dlt_device_id.device_id.CopyFrom(device_id)         # pylint: disable=no-member
                 self.dlt_connector_client.RecordDevice(dlt_device_id)
             elif isinstance(dlt_record, Link):
                 link_id = self.context_client.SetLink(dlt_record)
+                if self.dlt_connector_client is None: continue
                 dlt_link_id = DltLinkId()
                 dlt_link_id.topology_id.CopyFrom(topology_id)       # pylint: disable=no-member
                 dlt_link_id.link_id.CopyFrom(link_id)               # pylint: disable=no-member
                 self.dlt_connector_client.RecordLink(dlt_link_id)
             elif isinstance(dlt_record, Service):
                 service_id = self.context_client.SetService(dlt_record)
+                if self.dlt_connector_client is None: continue
                 dlt_service_id = DltServiceId()
                 dlt_service_id.topology_id.CopyFrom(topology_id)    # pylint: disable=no-member
                 dlt_service_id.service_id.CopyFrom(service_id)      # pylint: disable=no-member
                 self.dlt_connector_client.RecordService(dlt_service_id)
             elif isinstance(dlt_record, Slice):
                 slice_id = self.context_client.SetSlice(dlt_record)
+                if self.dlt_connector_client is None: continue
                 dlt_slice_id = DltSliceId()
                 dlt_slice_id.topology_id.CopyFrom(topology_id)      # pylint: disable=no-member
                 dlt_slice_id.slice_id.CopyFrom(slice_id)            # pylint: disable=no-member
diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
index bdbf016f85a0c58969b7a58677f9695e2635c8c0..20b186f307fb583734f8d0e96cea2a26e24e5590 100644
--- a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
+++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
@@ -14,8 +14,9 @@
 
 import logging, threading
 from typing import Dict, Optional, Tuple
-from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME, ServiceNameEnum
 from common.DeviceTypes import DeviceTypeEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_missing_environment_variables, get_env_var_name
 from common.proto.context_pb2 import (
     ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPoint, EndPointId, Link, LinkEvent, TopologyId,
     TopologyEvent)
@@ -48,7 +49,6 @@ class TopologyAbstractor(threading.Thread):
         self.terminate = threading.Event()
 
         self.context_client = ContextClient()
-        self.dlt_connector_client = DltConnectorClient()
         self.context_event_collector = EventsCollector(self.context_client)
 
         self.real_to_abstract_device_uuid : Dict[str, str] = dict()
@@ -69,7 +69,6 @@ class TopologyAbstractor(threading.Thread):
         topology_uuids = [DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME]
         create_missing_topologies(self.context_client, ADMIN_CONTEXT_ID, topology_uuids)
 
-        self.dlt_connector_client.connect()
         self.context_event_collector.start()
 
         while not self.terminate.is_set():
@@ -81,7 +80,6 @@ class TopologyAbstractor(threading.Thread):
 
         self.context_event_collector.stop()
         self.context_client.close()
-        self.dlt_connector_client.close()
 
     #def ignore_event(self, event : EventTypes) -> List[DltRecordIdTypes]:
     #    # TODO: filter events resulting from abstraction computation
@@ -226,7 +224,18 @@ class TopologyAbstractor(threading.Thread):
             if changed: dlt_record_sender.add_link(INTERDOMAIN_TOPOLOGY_ID, abstract_link.link)
 
     def update_abstraction(self, event : EventTypes) -> None:
-        dlt_record_sender = DltRecordSender(self.context_client, self.dlt_connector_client)
+        missing_env_vars = find_missing_environment_variables([
+            get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_HOST     ),
+            get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        ])
+        if len(missing_env_vars) == 0:
+            # DLT available
+            dlt_connector_client = DltConnectorClient()
+            dlt_connector_client.connect()
+        else:
+            dlt_connector_client = None
+
+        dlt_record_sender = DltRecordSender(self.context_client, dlt_connector_client)
 
         if isinstance(event, ContextEvent):
             LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event)))
@@ -286,3 +295,4 @@ class TopologyAbstractor(threading.Thread):
             LOGGER.warning('Unsupported Event({:s})'.format(grpc_message_to_json_string(event)))
 
         dlt_record_sender.commit()
+        dlt_connector_client.close()
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index fdd400a2110fd4a75d6f9e8cc4820bc943eef423..149c2568bf2cc08b351d9af31ecbe512faf3e796 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -256,8 +256,8 @@ class RequestGenerator:
                 json_constraint_sla_latency(e2e_latency_ms),
             ]
 
-            vlan_id = num_request % 1000
-            circuit_id = '{:03d}'.format(vlan_id + 100)
+            vlan_id = 300 + num_request % 1000
+            circuit_id = '{:03d}'.format(vlan_id)
 
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
@@ -307,7 +307,7 @@ class RequestGenerator:
 
             bgp_as = 65000 + (num_request % 10000)
 
-            vlan_id = num_request % 100 +100
+            vlan_id = 300 + num_request % 1000
             x = num_request % 255
             y = num_request % 25 * num_request % 10
             route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id)
@@ -427,7 +427,7 @@ class RequestGenerator:
         ]
 
         if request_type == RequestType.SLICE_L2NM:
-            vlan_id = num_request % 1000
+            vlan_id = 300 + num_request % 1000
             circuit_id = '{:03d}'.format(vlan_id)
 
             src_device_name = self._device_data[src_device_uuid]['name']
@@ -459,7 +459,8 @@ class RequestGenerator:
             ]
 
         elif request_type == RequestType.SLICE_L3NM:
-            vlan_id = num_request % 1000
+            vlan_id = 300 + num_request % 1000
+            circuit_id = '{:03d}'.format(vlan_id)
             bgp_as = 60000 + (num_request % 10000)
             bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333)
             route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id)
diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
index ac44574ad60242b0acf21ba824ea448d5ec30bf1..072696324342bc425329c134cf6c48704de313da 100644
--- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
+++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
@@ -24,7 +24,7 @@ def setup_config_rules(
     if service_settings  is None: return []
     if endpoint_settings is None: return []
 
-    #json_settings          : Dict = service_settings.value
+    json_settings          : Dict = service_settings.value
     json_endpoint_settings : Dict = endpoint_settings.value
 
     #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
@@ -35,11 +35,14 @@ def setup_config_rules(
     #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
     #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
     sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
-    vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
+    vlan_id             = json_endpoint_settings.get('vlan_id',             None     )  # 400
     #address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
     #address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
     remote_router       = json_endpoint_settings.get('remote_router',       '0.0.0.0')  # '5.5.5.5'
-    circuit_id          = json_endpoint_settings.get('circuit_id',          '000'    )  # '111'
+    circuit_id          = json_endpoint_settings.get('circuit_id',          None    )  # '111'
+
+    if vlan_id is None: vlan_id = json_settings.get('vlan_id', 1)
+    if circuit_id is None: circuit_id = json_settings.get('circuit_id', '000')
 
     if_cirid_name         = '{:s}.{:s}'.format(endpoint_name, str(circuit_id))
     network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id))
diff --git a/src/tests/oeccpsc22/.gitignore b/src/tests/oeccpsc22/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..6b97d6fe3ad32f39097745229ab7f547f26ecb12
--- /dev/null
+++ b/src/tests/oeccpsc22/.gitignore
@@ -0,0 +1 @@
+# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
diff --git a/src/tests/oeccpsc22/delete_all.sh b/src/tests/oeccpsc22/delete_all.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c0d39fa8bd690ebfe13bea321f94f769d5817079
--- /dev/null
+++ b/src/tests/oeccpsc22/delete_all.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-dom1 tfs-dom2
+
+# Delete secondary ingress controllers
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom1.yaml
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom2.yaml
diff --git a/src/tests/oeccpsc22/deploy_all.sh b/src/tests/oeccpsc22/deploy_all.sh
new file mode 100755
index 0000000000000000000000000000000000000000..737230346593bff3840cf7097fcf726242f84ce4
--- /dev/null
+++ b/src/tests/oeccpsc22/deploy_all.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-dom1 tfs-dom2
+
+# Delete secondary ingress controllers
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom1.yaml
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom2.yaml
+
+# Create secondary ingress controllers
+kubectl apply -f oeccpsc22/nginx-ingress-controller-dom1.yaml
+kubectl apply -f oeccpsc22/nginx-ingress-controller-dom2.yaml
+
+# Deploy TFS for Domain 1
+source oeccpsc22/deploy_specs_dom1.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom1.sh
+
+# Deploy TFS for Domain 2
+source oeccpsc22/deploy_specs_dom2.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom2.sh
diff --git a/src/tests/oeccpsc22/deploy_dom1.sh b/src/tests/oeccpsc22/deploy_dom1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..81fb9984450461f46e9011c7ef49dfe3070fc371
--- /dev/null
+++ b/src/tests/oeccpsc22/deploy_dom1.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-dom1
+
+# Delete secondary ingress controllers
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom1.yaml
+
+# Create secondary ingress controllers
+kubectl apply -f oeccpsc22/nginx-ingress-controller-dom1.yaml
+
+# Deploy TFS for Domain 1
+source oeccpsc22/deploy_specs_dom1.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom1.sh
diff --git a/src/tests/oeccpsc22/deploy_dom2.sh b/src/tests/oeccpsc22/deploy_dom2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..93fff1d15a56ceb20a6d625643453244eb71124e
--- /dev/null
+++ b/src/tests/oeccpsc22/deploy_dom2.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-dom2
+
+# Delete secondary ingress controllers
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom2.yaml
+
+# Create secondary ingress controllers
+kubectl apply -f oeccpsc22/nginx-ingress-controller-dom2.yaml
+
+# Deploy TFS for Domain 2
+source oeccpsc22/deploy_specs_dom2.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom2.sh
diff --git a/src/tests/oeccpsc22/deploy_specs_dom1.sh b/src/tests/oeccpsc22/deploy_specs_dom1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b269236b0bd7ac6ac21a743205157fd9aec42c37
--- /dev/null
+++ b/src/tests/oeccpsc22/deploy_specs_dom1.sh
@@ -0,0 +1,127 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui interdomain load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice interdomain webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs-dom1"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="oeccpsc22/nginx-ingress-http-dom1.yaml oeccpsc22/expose-services-dom1.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom1"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS=""
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom1"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4223"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8223"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom1"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8813"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9011"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9001"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST=""
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/oeccpsc22/deploy_specs_dom2.sh b/src/tests/oeccpsc22/deploy_specs_dom2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..112142437ed172f3b773ae148b57b5e0732676fb
--- /dev/null
+++ b/src/tests/oeccpsc22/deploy_specs_dom2.sh
@@ -0,0 +1,127 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui interdomain load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice interdomain webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs-dom2"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="oeccpsc22/nginx-ingress-http-dom2.yaml oeccpsc22/expose-services-dom2.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom2"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS=""
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom2"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4224"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8224"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom2"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8814"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9012"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9002"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST=""
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/oeccpsc22/descriptors/domain1.json b/src/tests/oeccpsc22/descriptors/domain1.json
new file mode 100644
index 0000000000000000000000000000000000000000..2db10b4d1f2e7a702e6da99d1da28fc2fc21c288
--- /dev/null
+++ b/src/tests/oeccpsc22/descriptors/domain1.json
@@ -0,0 +1,178 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}},
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "int", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "D1",  "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  }
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "int", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "D2",  "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  }
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "D2"}}, "device_type": "network", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "10010"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "D1",  "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  },
+                    {"uuid": "DC2", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  }
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R1@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "2",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "5",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "DC1", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  }
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R2@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "1",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "3",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "5",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R3@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "2",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "4",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R4@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "3",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "5",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "D2",  "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  }
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R5@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "1",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "2",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "4",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {"link_id": {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}},
+            {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R1@D1/DC1==DC1/D1"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}},
+            {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "DC2/D2==D2/DC2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}},
+            {"device_id": {"device_uuid": {"uuid": "D2"}}, "endpoint_uuid": {"uuid": "DC2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "D2/DC2==DC2/D2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "D2"}}, "endpoint_uuid": {"uuid": "DC2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}},
+            {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R4@D1/D2==D2/D1"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}},
+            {"device_id": {"device_uuid": {"uuid": "D2"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "D2/D1==R4@D1/D2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "D2"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}},
+            {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+        ]},
+
+        {"link_id": {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}},
+            {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R1@D1/5==R5@D1/1"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}},
+            {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R2@D1/1==R1@D1/2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}},
+            {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}},
+            {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}},
+            {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R3@D1/2==R2@D1/3"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}},
+            {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}},
+            {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R4@D1/3==R3@D1/4"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}},
+            {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}},
+            {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}},
+            {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R5@D1/2==R2@D1/5"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}},
+            {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R5@D1/4==R4@D1/5"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}},
+            {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}}
+        ]}
+    ]
+}
diff --git a/src/tests/oeccpsc22/descriptors/domain2.json b/src/tests/oeccpsc22/descriptors/domain2.json
new file mode 100644
index 0000000000000000000000000000000000000000..e7a00f74e7cc14c9db6d7be6fa89ef538f9cfa34
--- /dev/null
+++ b/src/tests/oeccpsc22/descriptors/domain2.json
@@ -0,0 +1,72 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}},
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "R1@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "2", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "3", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R2@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "1",  "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "3",  "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "D1", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  }
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R3@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "1",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "2",   "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"},
+                    {"uuid": "DC2", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border"  }
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {"link_id": {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}},
+            {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R1@D2/3==R3@D2/1"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "3"}},
+            {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "1"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R2@D2/1==R1@D2/2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}},
+            {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}},
+            {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R3@D2/1==R1@D2/3"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "1"}},
+            {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "3"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "R3@D2/2==R2@D2/3"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}},
+            {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}}
+        ]}
+    ]
+}
diff --git a/src/tests/oeccpsc22/descriptors/inter-domain-service.json b/src/tests/oeccpsc22/descriptors/inter-domain-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..4b53c433aba16845b703b72a3fe4ef1f9e54c0f2
--- /dev/null
+++ b/src/tests/oeccpsc22/descriptors/inter-domain-service.json
@@ -0,0 +1,32 @@
+{
+    "services": [
+        {
+            "service_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "idc-l2-svc"}},
+            "service_type": 2,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}},
+                {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}}
+            ],
+            "service_constraints": [
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 15.2}}
+            ],
+            "service_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"mtu": 1512, "vlan_id": 300}}},
+                {"action": 1, "custom": {"resource_key": "/device[R1@D1]/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R1@D1]/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.0.5"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R2@D1]/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.0.1"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R2@D1]/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.0.3"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R2@D1]/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.0.5"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R3@D1]/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R3@D1]/endpoint[4]/settings", "resource_value": {"remote_router": "10.0.0.4"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R4@D1]/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.0.3"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R4@D1]/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.0.5"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R5@D1]/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.0.1"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R5@D1]/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}},
+                {"action": 1, "custom": {"resource_key": "/device[R5@D1]/endpoint[4]/settings", "resource_value": {"remote_router": "10.0.0.4"}}}
+            ]}
+        }
+    ]
+}
diff --git a/src/tests/oeccpsc22/dump_logs.sh b/src/tests/oeccpsc22/dump_logs.sh
index a2180f6dff3f35cc7d0e9e2011179a6d8b933ea1..3a2e51a56b9346a254995d935fd45d8b5e7a1396 100755
--- a/src/tests/oeccpsc22/dump_logs.sh
+++ b/src/tests/oeccpsc22/dump_logs.sh
@@ -5,7 +5,7 @@
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#      http://www.apache.org/licenses/LICENSE-2.0
+#     http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,20 +13,33 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-mkdir -p tmp/exec_logs/
 
-kubectl --namespace oeccpsc22-1 logs deployment/computeservice -c server > tmp/exec_logs/d1_compute.log
-kubectl --namespace oeccpsc22-1 logs deployment/contextservice -c server > tmp/exec_logs/d1_context.log
-kubectl --namespace oeccpsc22-1 logs deployment/deviceservice -c server > tmp/exec_logs/d1_device.log
-kubectl --namespace oeccpsc22-1 logs deployment/interdomainservice -c server > tmp/exec_logs/d1_interdomain.log
-kubectl --namespace oeccpsc22-1 logs deployment/monitoringservice -c server > tmp/exec_logs/d1_monitoring.log
-kubectl --namespace oeccpsc22-1 logs deployment/serviceservice -c server > tmp/exec_logs/d1_service.log
-kubectl --namespace oeccpsc22-1 logs deployment/sliceservice -c server > tmp/exec_logs/d1_slice.log
+rm -rf tmp/exec
 
-kubectl --namespace oeccpsc22-2 logs deployment/computeservice -c server > tmp/exec_logs/d2_compute.log
-kubectl --namespace oeccpsc22-2 logs deployment/contextservice -c server > tmp/exec_logs/d2_context.log
-kubectl --namespace oeccpsc22-2 logs deployment/deviceservice -c server > tmp/exec_logs/d2_device.log
-kubectl --namespace oeccpsc22-2 logs deployment/interdomainservice -c server > tmp/exec_logs/d2_interdomain.log
-kubectl --namespace oeccpsc22-2 logs deployment/monitoringservice -c server > tmp/exec_logs/d2_monitoring.log
-kubectl --namespace oeccpsc22-2 logs deployment/serviceservice -c server > tmp/exec_logs/d2_service.log
-kubectl --namespace oeccpsc22-2 logs deployment/sliceservice -c server > tmp/exec_logs/d2_slice.log
+echo "Collecting logs for Domain 1..."
+mkdir -p tmp/exec/dom1
+kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/exec/dom1/context.log
+kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/exec/dom1/device.log
+kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/dom1/service.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log
+kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log
+kubectl --namespace tfs-dom1 logs deployment/computeservice server > tmp/exec/dom1/compute.log
+kubectl --namespace tfs-dom1 logs deployment/interdomainservice server > tmp/exec/dom1/interdomain.log
+kubectl --namespace tfs-dom1 logs deployment/monitoringservice server > tmp/exec/dom1/monitoring.log
+printf "\n"
+
+echo "Collecting logs for Domain 2..."
+mkdir -p tmp/exec/dom2
+kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/exec/dom2/context.log
+kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/exec/dom2/device.log
+kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/dom2/service.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log
+kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log
+kubectl --namespace tfs-dom2 logs deployment/computeservice server > tmp/exec/dom2/compute.log
+kubectl --namespace tfs-dom2 logs deployment/interdomainservice server > tmp/exec/dom2/interdomain.log
+kubectl --namespace tfs-dom2 logs deployment/monitoringservice server > tmp/exec/dom2/monitoring.log
+printf "\n"
+
+echo "Done!"
diff --git a/src/tests/oeccpsc22/expose-services-dom1.yaml b/src/tests/oeccpsc22/expose-services-dom1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ebfb38fc4781240887fa52163224607da661b952
--- /dev/null
+++ b/src/tests/oeccpsc22/expose-services-dom1.yaml
@@ -0,0 +1,106 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: remote-teraflow
+spec:
+  type: ExternalName
+  externalName: interdomainservice.dom2.svc.cluster.local
+  ports:
+  - name: grpc
+    protocol: TCP
+    port: 10010
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: contextservice-public
+#  labels:
+#    app: contextservice
+#spec:
+#  type: NodePort
+#  selector:
+#    app: contextservice
+#  ports:
+#  - name: grpc
+#    protocol: TCP
+#    port: 1010
+#    targetPort: 1010
+#    nodePort: 30111
+#  - name: rest
+#    protocol: TCP
+#    port: 8080
+#    targetPort: 8080
+#    nodePort: 30001
+#  - name: redis
+#    protocol: TCP
+#    port: 6379
+#    targetPort: 6379
+#    nodePort: 30631
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: deviceservice-public
+#  labels:
+#    app: deviceservice
+#spec:
+#  type: NodePort
+#  selector:
+#    app: deviceservice
+#  ports:
+#  - name: grpc
+#    protocol: TCP
+#    port: 2020
+#    targetPort: 2020
+#    nodePort: 30221
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: computeservice-public
+#spec:
+#  type: NodePort
+#  selector:
+#    app: computeservice
+#  ports:
+#  - name: http
+#    protocol: TCP
+#    port: 8080
+#    targetPort: 8080
+#    nodePort: 30881
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: webuiservice-public
+#  labels:
+#    app: webuiservice
+#spec:
+#  type: NodePort
+#  selector:
+#    app: webuiservice
+#  ports:
+#  - name: http
+#    protocol: TCP
+#    port: 8004
+#    targetPort: 8004
+#    nodePort: 30801
+#  - name: grafana
+#    protocol: TCP
+#    port: 3000
+#    targetPort: 3000
+#    nodePort: 30301
diff --git a/src/tests/oeccpsc22/expose-services-dom2.yaml b/src/tests/oeccpsc22/expose-services-dom2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cf04f3f5eb4b029cc4cb03e41df9c8ced23d92f8
--- /dev/null
+++ b/src/tests/oeccpsc22/expose-services-dom2.yaml
@@ -0,0 +1,106 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: remote-teraflow
+spec:
+  type: ExternalName
+  externalName: interdomainservice.dom1.svc.cluster.local
+  ports:
+  - name: grpc
+    protocol: TCP
+    port: 10010
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: contextservice-public
+#  labels:
+#    app: contextservice
+#spec:
+#  type: NodePort
+#  selector:
+#    app: contextservice
+#  ports:
+#  - name: grpc
+#    protocol: TCP
+#    port: 1010
+#    targetPort: 1010
+#    nodePort: 30112
+#  - name: rest
+#    protocol: TCP
+#    port: 8080
+#    targetPort: 8080
+#    nodePort: 30002
+#  - name: redis
+#    protocol: TCP
+#    port: 6379
+#    targetPort: 6379
+#    nodePort: 30632
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: deviceservice-public
+#  labels:
+#    app: deviceservice
+#spec:
+#  type: NodePort
+#  selector:
+#    app: deviceservice
+#  ports:
+#  - name: grpc
+#    protocol: TCP
+#    port: 2020
+#    targetPort: 2020
+#    nodePort: 30222
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: computeservice-public
+#spec:
+#  type: NodePort
+#  selector:
+#    app: computeservice
+#  ports:
+#  - name: http
+#    protocol: TCP
+#    port: 8080
+#    targetPort: 8080
+#    nodePort: 30882
+#---
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: webuiservice-public
+#  labels:
+#    app: webuiservice
+#spec:
+#  type: NodePort
+#  selector:
+#    app: webuiservice
+#  ports:
+#  - name: http
+#    protocol: TCP
+#    port: 8004
+#    targetPort: 8004
+#    nodePort: 30802
+#  - name: grafana
+#    protocol: TCP
+#    port: 3000
+#    targetPort: 3000
+#    nodePort: 30302
diff --git a/src/tests/oeccpsc22/expose_services_teraflow_1.yaml b/src/tests/oeccpsc22/expose_services_teraflow_1.yaml
deleted file mode 100644
index d956db1a781147467efd1c4724e0734b579d8f5d..0000000000000000000000000000000000000000
--- a/src/tests/oeccpsc22/expose_services_teraflow_1.yaml
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: remote-teraflow
-spec:
-  type: ExternalName
-  externalName: interdomainservice.oeccpsc22-2.svc.cluster.local
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 10010
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: contextservice-public
-  labels:
-    app: contextservice
-spec:
-  type: NodePort
-  selector:
-    app: contextservice
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 1010
-    targetPort: 1010
-    nodePort: 30111
-  - name: rest
-    protocol: TCP
-    port: 8080
-    targetPort: 8080
-    nodePort: 30001
-  - name: redis
-    protocol: TCP
-    port: 6379
-    targetPort: 6379
-    nodePort: 30631
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: deviceservice-public
-  labels:
-    app: deviceservice
-spec:
-  type: NodePort
-  selector:
-    app: deviceservice
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 2020
-    targetPort: 2020
-    nodePort: 30221
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: computeservice-public
-spec:
-  type: NodePort
-  selector:
-    app: computeservice
-  ports:
-  - name: http
-    protocol: TCP
-    port: 8080
-    targetPort: 8080
-    nodePort: 30881
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: webuiservice-public
-  labels:
-    app: webuiservice
-spec:
-  type: NodePort
-  selector:
-    app: webuiservice
-  ports:
-  - name: http
-    protocol: TCP
-    port: 8004
-    targetPort: 8004
-    nodePort: 30801
-  - name: grafana
-    protocol: TCP
-    port: 3000
-    targetPort: 3000
-    nodePort: 30301
diff --git a/src/tests/oeccpsc22/expose_services_teraflow_2.yaml b/src/tests/oeccpsc22/expose_services_teraflow_2.yaml
deleted file mode 100644
index d8acb96533f8886afc0fd5c802d3616277676e33..0000000000000000000000000000000000000000
--- a/src/tests/oeccpsc22/expose_services_teraflow_2.yaml
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: remote-teraflow
-spec:
-  type: ExternalName
-  externalName: interdomainservice.oeccpsc22-1.svc.cluster.local
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 10010
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: contextservice-public
-  labels:
-    app: contextservice
-spec:
-  type: NodePort
-  selector:
-    app: contextservice
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 1010
-    targetPort: 1010
-    nodePort: 30112
-  - name: rest
-    protocol: TCP
-    port: 8080
-    targetPort: 8080
-    nodePort: 30002
-  - name: redis
-    protocol: TCP
-    port: 6379
-    targetPort: 6379
-    nodePort: 30632
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: deviceservice-public
-  labels:
-    app: deviceservice
-spec:
-  type: NodePort
-  selector:
-    app: deviceservice
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 2020
-    targetPort: 2020
-    nodePort: 30222
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: computeservice-public
-spec:
-  type: NodePort
-  selector:
-    app: computeservice
-  ports:
-  - name: http
-    protocol: TCP
-    port: 8080
-    targetPort: 8080
-    nodePort: 30882
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: webuiservice-public
-  labels:
-    app: webuiservice
-spec:
-  type: NodePort
-  selector:
-    app: webuiservice
-  ports:
-  - name: http
-    protocol: TCP
-    port: 8004
-    targetPort: 8004
-    nodePort: 30802
-  - name: grafana
-    protocol: TCP
-    port: 3000
-    targetPort: 3000
-    nodePort: 30302
diff --git a/src/tests/oeccpsc22/fast_redeploy.sh b/src/tests/oeccpsc22/fast_redeploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f4e909eef106a4720d184415d1ec584adb98e1b0
--- /dev/null
+++ b/src/tests/oeccpsc22/fast_redeploy.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+kubectl delete namespace tfs-dom1 tfs-dom2
+
+echo "Deploying tfs-dom1 ..."
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom1.yaml             > ./tmp/logs/deploy-tfs-dom1.log
+kubectl create namespace tfs-dom1                                          > ./tmp/logs/deploy-tfs-dom1.log
+kubectl apply -f oeccpsc22/nginx-ingress-controller-dom1.yaml              > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/contextservice.yaml  > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/deviceservice.yaml   > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/serviceservice.yaml  > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/sliceservice.yaml    > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/webuiservice.yaml    > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f oeccpsc22/tfs-ingress-dom1.yaml      > ./tmp/logs/deploy-tfs-dom1.log
+printf "\n"
+
+echo "Deploying tfs-dom2 ..."
+kubectl delete -f oeccpsc22/nginx-ingress-controller-dom2.yaml              > ./tmp/logs/deploy-tfs-dom2.log
+kubectl create namespace tfs-dom2                                           > ./tmp/logs/deploy-tfs-dom2.log
+kubectl apply -f oeccpsc22/nginx-ingress-controller-dom2.yaml               > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/contextservice.yaml   > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/deviceservice.yaml    > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/pathcompservice.yaml  > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/serviceservice.yaml   > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/sliceservice.yaml     > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/webuiservice.yaml     > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f oeccpsc22/tfs-ingress-dom2.yaml       > ./tmp/logs/deploy-tfs-dom2.log
+printf "\n"
+
+echo "Waiting tfs-dom1 ..."
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Waiting tfs-dom2 ..."
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Done!"
diff --git a/src/tests/oeccpsc22/nginx-ingress-controller-dom1.yaml b/src/tests/oeccpsc22/nginx-ingress-controller-dom1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1815bfbaa481ef269c513f8b55d949127f10bc30
--- /dev/null
+++ b/src/tests/oeccpsc22/nginx-ingress-controller-dom1.yaml
@@ -0,0 +1,134 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-dom1
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-dom1
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-dom1
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-dom1
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-dom1
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-dom1
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-dom1
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-dom1
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-dom1
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8001
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4431
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12541
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom1
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom1
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom1
+        - --election-id=ingress-controller-leader-dom1
+        - --controller-class=tfs.etsi.org/controller-class-dom1
+        - --ingress-class=tfs-ingress-class-dom1
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/oeccpsc22/nginx-ingress-controller-dom2.yaml b/src/tests/oeccpsc22/nginx-ingress-controller-dom2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dede032850092733aa9b3136f20ecf078b2ce05a
--- /dev/null
+++ b/src/tests/oeccpsc22/nginx-ingress-controller-dom2.yaml
@@ -0,0 +1,134 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-dom2
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-dom2
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-dom2
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-dom2
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-dom2
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-dom2
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-dom2
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-dom2
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-dom2
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8002
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4432
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12542
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom2
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom2
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom2
+        - --election-id=ingress-controller-leader-dom2
+        - --controller-class=tfs.etsi.org/controller-class-dom2
+        - --ingress-class=tfs-ingress-class-dom2
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/oeccpsc22/nginx-ingress-http-dom1.yaml b/src/tests/oeccpsc22/nginx-ingress-http-dom1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b03699946e6a81e6cb8a91379f0952c2894e6578
--- /dev/null
+++ b/src/tests/oeccpsc22/nginx-ingress-http-dom1.yaml
@@ -0,0 +1,46 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-dom1
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-dom1
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/tests/oeccpsc22/nginx-ingress-http-dom2.yaml b/src/tests/oeccpsc22/nginx-ingress-http-dom2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d07b73ee2a94d6ed54b4b5658c595e1ce635bddb
--- /dev/null
+++ b/src/tests/oeccpsc22/nginx-ingress-http-dom2.yaml
@@ -0,0 +1,53 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-dom2
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-dom2
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/tests/oeccpsc22/deploy_in_kubernetes.sh b/src/tests/oeccpsc22/old/deploy_in_kubernetes.sh
similarity index 100%
rename from src/tests/oeccpsc22/deploy_in_kubernetes.sh
rename to src/tests/oeccpsc22/old/deploy_in_kubernetes.sh
diff --git a/src/tests/oeccpsc22/show_deploy.sh b/src/tests/oeccpsc22/show_deploy.sh
index d5e9346e51cb5bf6ffa442c0b3f9356176efff5c..77a8b8781701d660ad517c1714b3ec367b31dd1d 100755
--- a/src/tests/oeccpsc22/show_deploy.sh
+++ b/src/tests/oeccpsc22/show_deploy.sh
@@ -13,14 +13,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Deploy TeraFlow instance 1
-printf "TeraFlow Instance 1:\n--------------------\n"
-export K8S_NAMESPACE="oeccpsc22-1"
-kubectl --namespace $K8S_NAMESPACE get all
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Deploy TeraFlow Domain 1
+printf "TeraFlow Domain 1:\n--------------------\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom1 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom1 get ingress
+printf "\n"
 
 printf "\n\n"
 
-# Deploy TeraFlow instance 2
-printf "TeraFlow Instance 2:\n--------------------\n"
-export K8S_NAMESPACE="oeccpsc22-2"
-kubectl --namespace $K8S_NAMESPACE get all
+# Deploy TeraFlow Domain 2
+printf "TeraFlow Domain 2:\n--------------------\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom2 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom2 get ingress
+printf "\n"
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 4c31b61935aca2bd7d2a5e7642168afdea6fd02d..e75461885c5eb60c61f3ff58592db1b778293b42 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -149,7 +149,7 @@
           <div class="container">
             <div class="row">
               <div class="col-md-12">
-                <p class="text-center" style="color: white;">&copy; 2021-2023</p>
+                <p class="text-center" style="color: white;">&copy; 2022-2023 <a href="https://tfs.etsi.org/">ETSI TeraFlowSDN (TFS) OSG</a></p>
               </div>
             </div>
             <div class="row">