diff --git a/.gitignore b/.gitignore
index 7e3b0cd6a26b755aeac4422f530c331d25a0cc43..0a116f850780386a9fe1010b22164f4c7dbf8228 100644
--- a/.gitignore
+++ b/.gitignore
@@ -162,6 +162,7 @@ cython_debug/
 
 # TeraFlowSDN-generated files
 tfs_runtime_env_vars.sh
+tfs_runtime_env_vars*.sh
 tfs_bchain_runtime_env_vars.sh
 delete_local_deployment.sh
 local_docker_deployment.sh
diff --git a/deploy.sh b/deploy.sh
index add41fa139a0127cb26d652f5b47decfe8658ad0..fa1dc2b3623255d2dac82cc1d982c607b9b6af5b 100755
--- a/deploy.sh
+++ b/deploy.sh
@@ -36,9 +36,13 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # If not already set, set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
 
-# If not already set, set the neew Grafana admin password
+# If not already set, set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
 
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -67,73 +71,75 @@ echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
 for COMPONENT in $TFS_COMPONENTS; do
     echo "Processing '$COMPONENT' component..."
 
-    echo "  Building Docker image..."
-    BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
-
-    if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
-        docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
-    elif [ "$COMPONENT" == "pathcomp" ]; then
-        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
-        docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
-
-        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
-        docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
-        # next command is redundant, but helpful to keep cache updated between rebuilds
-        IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
-        docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
-    elif [ "$COMPONENT" == "dlt" ]; then
-        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
-        docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
-
-        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
-        docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
-    else
-        docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
-    fi
+    if [ "$TFS_SKIP_BUILD" != "YES" ]; then
+        echo "  Building Docker image..."
+        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+
+        if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
+            docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+        elif [ "$COMPONENT" == "pathcomp" ]; then
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
+            docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
+
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
+            docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
+            # next command is redundant, but helpful to keep cache updated between rebuilds
+            IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
+            docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
+            docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
 
-    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
-        echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
+            docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
+        else
+            docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+        fi
 
-        if [ "$COMPONENT" == "pathcomp" ]; then
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        if [ -n "$TFS_REGISTRY_IMAGE" ]; then
+            echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
 
-            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
-            docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+            if [ "$COMPONENT" == "pathcomp" ]; then
+                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
-            docker push "$IMAGE_URL" > "$PUSH_LOG"
+                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
+                docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
+                docker push "$IMAGE_URL" > "$PUSH_LOG"
 
-            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
-            docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
-            docker push "$IMAGE_URL" > "$PUSH_LOG"
-        elif [ "$COMPONENT" == "dlt" ]; then
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
+                docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log"
-            docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
+                docker push "$IMAGE_URL" > "$PUSH_LOG"
+            elif [ "$COMPONENT" == "dlt" ]; then
+                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log"
-            docker push "$IMAGE_URL" > "$PUSH_LOG"
+                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log"
+                docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log"
+                docker push "$IMAGE_URL" > "$PUSH_LOG"
 
-            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log"
-            docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log"
-            docker push "$IMAGE_URL" > "$PUSH_LOG"
-        else
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log"
+                docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log"
+                docker push "$IMAGE_URL" > "$PUSH_LOG"
+            else
+                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
-            docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+                docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
-            docker push "$IMAGE_URL" > "$PUSH_LOG"
+                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+                docker push "$IMAGE_URL" > "$PUSH_LOG"
+            fi
         fi
     fi
 
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index 04da586dfeb25a01a6f5267aa31441498ce4f2cc..5c07971a328a389473899375f2d2aad9031f473e 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -34,10 +34,10 @@ spec:
         - containerPort: 6379
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 100m
+            memory: 128Mi
           limits:
-            cpu: 700m
+            cpu: 500m
             memory: 1024Mi
       - name: server
         image: registry.gitlab.com/teraflow-h2020/controller/context:latest
@@ -64,11 +64,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:1010"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index 171394f7c43b2447e898902c78d5276fe1bcbc7c..d2595ab1915554d7ebfd786b8f39b531e40da490 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -43,11 +43,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:2020"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml
index 5ef6eae7de6cb7c839b0cb17e65c8b3f045c1d66..d2ad4f40444faa6b9de7724f8b3df077bb7910b2 100644
--- a/manifests/dltservice.yaml
+++ b/manifests/dltservice.yaml
@@ -35,6 +35,11 @@ spec:
         env:
         - name: LOG_LEVEL
           value: "INFO"
+        ## for debug purposes
+        #- name: DLT_GATEWAY_HOST
+        #  value: "mock-blockchain.tfs-bchain.svc.cluster.local"
+        #- name: DLT_GATEWAY_PORT
+        #  value: "50051"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:8080"]
@@ -43,14 +48,16 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:8080"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
       - name: gateway
         image: registry.gitlab.com/teraflow-h2020/controller/dlt-gateway:latest
         imagePullPolicy: Always
+        ports:
+        - containerPort: 50051
         #readinessProbe:
         #  httpGet:
         #    path: /health
@@ -65,7 +72,7 @@ spec:
         #  timeoutSeconds: 5
         resources:
           requests:
-            cpu: 250m
+            cpu: 200m
             memory: 512Mi
           limits:
             cpu: 700m
diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml
index ca30da0101659f801440af343e42851146d17bda..3ef3ffba301cadf26beaa34787dcd816e87c65a0 100644
--- a/manifests/interdomainservice.yaml
+++ b/manifests/interdomainservice.yaml
@@ -34,7 +34,7 @@ spec:
         - containerPort: 10010
         env:
         - name: LOG_LEVEL
-          value: "DEBUG"
+          value: "INFO"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:10010"]
@@ -43,11 +43,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:10010"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml
index b383d7db42be9eb3c9dc7758c230f5250eb43db1..bf9abac703b263ad6a843f0d70848dde94a4ab97 100644
--- a/manifests/mock_blockchain.yaml
+++ b/manifests/mock_blockchain.yaml
@@ -34,7 +34,7 @@ spec:
         - containerPort: 50051
         env:
         - name: LOG_LEVEL
-          value: "DEBUG"
+          value: "INFO"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:50051"]
@@ -43,7 +43,7 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:50051"]
         resources:
           requests:
-            cpu: 250m
+            cpu: 100m
             memory: 512Mi
           limits:
             cpu: 700m
diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml
index d5939cb154443139be88d8e0ac23c281a3b18c4d..92e24ac42b7b86be6056709abd9a2cd6fc16598b 100644
--- a/manifests/pathcompservice.yaml
+++ b/manifests/pathcompservice.yaml
@@ -43,11 +43,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:10020"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
       - name: backend
         image: registry.gitlab.com/teraflow-h2020/controller/pathcomp-backend:latest
         imagePullPolicy: Always
@@ -65,8 +65,8 @@ spec:
         #  timeoutSeconds: 5
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 100m
+            memory: 256Mi
           limits:
             cpu: 700m
             memory: 1024Mi
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 75832b94fa2a6ba97617641e7b249157508614bf..a5568a5112eb08a02df2178ba45db57b57c19cc3 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -43,11 +43,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:3030"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index 8c76618a96fa6cc7b83bc6ebf52062958a2a3689..b20669b0c03cc22857abd1534e19780025b9066a 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -43,11 +43,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:4040"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index cac64a816075f1a0ad91a21c519463aa5cd8f973..7f70e837c4b6b979477a3a02db6e744b41387d73 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -38,7 +38,7 @@ spec:
         - containerPort: 8004
         env:
         - name: LOG_LEVEL
-          value: "DEBUG"
+          value: "INFO"
         - name: WEBUISERVICE_SERVICE_BASEURL_HTTP
           value: "/webui/"
         readinessProbe:
@@ -55,7 +55,7 @@ spec:
           timeoutSeconds: 1
         resources:
           requests:
-            cpu: 250m
+            cpu: 100m
             memory: 512Mi
           limits:
             cpu: 700m
diff --git a/my_deploy.sh b/my_deploy.sh
index e70a12e1556ab06f6daa89c316c6a6ed61c4e059..ffd91da35186fe21f418950493ef797a9af1b522 100644
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -18,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
-# Set the neew Grafana admin password
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
diff --git a/nfvsdn22 b/nfvsdn22
new file mode 120000
index 0000000000000000000000000000000000000000..ac93a84be42e09c11106c5e0836bb4e51cc1fa1a
--- /dev/null
+++ b/nfvsdn22
@@ -0,0 +1 @@
+src/tests/nfvsdn22/
\ No newline at end of file
diff --git a/proto/dlt_connector.proto b/proto/dlt_connector.proto
index c8cbeb663fafb3c133092e9c49c2ece3f59d75ae..1038d6ccd40c8393313fc7f8dbfd48b1e0cf1739 100644
--- a/proto/dlt_connector.proto
+++ b/proto/dlt_connector.proto
@@ -18,14 +18,37 @@ package dlt;
 import "context.proto";
 
 service DltConnectorService {
-  rpc RecordAll (context.Empty    ) returns (context.Empty) {}
+  rpc RecordAll        (context.TopologyId) returns (context.Empty) {}
   
-  rpc RecordAllDevices (context.Empty    ) returns (context.Empty) {}
-  rpc RecordDevice     (context.DeviceId ) returns (context.Empty) {}
+  rpc RecordAllDevices (context.TopologyId) returns (context.Empty) {}
+  rpc RecordDevice     (DltDeviceId       ) returns (context.Empty) {}
 
-  rpc RecordAllServices(context.Empty    ) returns (context.Empty) {}
-  rpc RecordService    (context.ServiceId) returns (context.Empty) {}
+  rpc RecordAllLinks   (context.TopologyId) returns (context.Empty) {}
+  rpc RecordLink       (DltLinkId         ) returns (context.Empty) {}
 
-  rpc RecordAllSlices  (context.Empty    ) returns (context.Empty) {}
-  rpc RecordSlice      (context.SliceId  ) returns (context.Empty) {}
+  rpc RecordAllServices(context.TopologyId) returns (context.Empty) {}
+  rpc RecordService    (DltServiceId      ) returns (context.Empty) {}
+
+  rpc RecordAllSlices  (context.TopologyId) returns (context.Empty) {}
+  rpc RecordSlice      (DltSliceId        ) returns (context.Empty) {}
+}
+
+message DltDeviceId {
+  context.TopologyId  topology_id = 1;
+  context.DeviceId    device_id = 2;
+}
+
+message DltLinkId {
+  context.TopologyId  topology_id = 1;
+  context.LinkId      link_id = 2;
+}
+
+message DltServiceId {
+  context.TopologyId  topology_id = 1;
+  context.ServiceId   service_id = 2;
+}
+
+message DltSliceId {
+  context.TopologyId  topology_id = 1;
+  context.SliceId     slice_id = 2;
 }
diff --git a/scripts/show_logs_dlt_connector.sh b/scripts/show_logs_dlt_connector.sh
new file mode 100755
index 0000000000000000000000000000000000000000..db4c388c20399007ba10b357a5e153df4a86c519
--- /dev/null
+++ b/scripts/show_logs_dlt_connector.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/dltservice -c connector
diff --git a/scripts/show_logs_dlt_gateway.sh b/scripts/show_logs_dlt_gateway.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c00be2df16cb69b3ace501a854d1248a72abbf3e
--- /dev/null
+++ b/scripts/show_logs_dlt_gateway.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/dltservice -c gateway
diff --git a/src/common/Constants.py b/src/common/Constants.py
index a536ef60047eb1f210f8d98d207134d377adcbed..964d904da704324d6def548103675e815743d818 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -30,8 +30,9 @@ DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0'
 DEFAULT_METRICS_PORT = 9192
 
 # Default context and topology UUIDs
-DEFAULT_CONTEXT_UUID = 'admin'
-DEFAULT_TOPOLOGY_UUID = 'admin'
+DEFAULT_CONTEXT_UUID      = 'admin'
+DEFAULT_TOPOLOGY_UUID     = 'admin'     # contains the detailed local topology
+INTERDOMAIN_TOPOLOGY_UUID = 'inter'     # contains the abstract inter-domain topology
 
 # Default service names
 class ServiceNameEnum(Enum):
@@ -50,7 +51,7 @@ class ServiceNameEnum(Enum):
     WEBUI         = 'webui'
 
     # Used for test and debugging only
-    DLT_GATEWAY   = 'dlt-gateway'
+    DLT_GATEWAY   = 'dltgateway'
 
 # Default gRPC service ports
 DEFAULT_SERVICE_GRPC_PORTS = {
diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py
index 98c96d6831ca7381c70975fd60335e8cecfc6e1b..c353708995cd5d8e4a7e2fde8d9bdd03732008eb 100644
--- a/src/common/DeviceTypes.py
+++ b/src/common/DeviceTypes.py
@@ -16,6 +16,9 @@ from enum import Enum
 
 class DeviceTypeEnum(Enum):
 
+    # Abstractions
+    NETWORK                         = 'network'
+
     # Emulated device types
     EMULATED_DATACENTER             = 'emu-datacenter'
     EMULATED_MICROWAVE_RADIO_SYSTEM = 'emu-microwave-radio-system'
diff --git a/src/common/tests/MockServicerImpl_DltGateway.py b/src/common/tests/MockServicerImpl_DltGateway.py
index 2d750168238b2a041badd1974f27e57f62363d90..f106519b2695cda519e95a79e7b559dd24818108 100644
--- a/src/common/tests/MockServicerImpl_DltGateway.py
+++ b/src/common/tests/MockServicerImpl_DltGateway.py
@@ -36,6 +36,10 @@ class AlreadyExistsException(Exception):
 class DoesNotExistException(Exception):
     pass
 
+MSG_NOT_EXISTS = 'RecordId({:s}, {:s}, {:s}) Does Not Exist'
+MSG_ALREADY_EXISTS = 'RecordId({:s}, {:s}, {:s}) Already Exists'
+MSG_OPERATION_NOT_IMPLEMENTED = 'DltRecordOperationEnum({:s}) Not Implemented'
+
 class MockServicerImpl_DltGateway(DltGatewayServiceServicer):
     def __init__(self):
         LOGGER.info('[__init__] Creating Servicer...')
@@ -43,16 +47,12 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer):
         self.msg_broker = MockMessageBroker()
         LOGGER.info('[__init__] Servicer Created')
 
-    def __get_record(self, record_id : DltRecordId, should_exist : bool) -> Optional[Dict]:
+    def __get_record(self, record_id : DltRecordId) -> Optional[Dict]:
         domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid
         str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '')
         records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {})
         records_type   : Dict[str, Dict] = records_domain.setdefault(str_type, {})
         record         : Optional[Dict] = records_type.get(record_uuid)
-        if should_exist and record is None:
-            raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
-        elif not should_exist and record is not None:
-            raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
         return record
 
     def __set_record(self, record_id : DltRecordId, should_exist : bool, data_json : str) -> None:
@@ -62,10 +62,10 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer):
         records_type   : Dict[str, Dict] = records_domain.setdefault(str_type, {})
         record         : Optional[Dict] = records_type.get(record_uuid)
         if should_exist and record is None:
-            raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
+            raise DoesNotExistException(MSG_NOT_EXISTS.format(domain_uuid, str_type, record_uuid))
         elif not should_exist and record is not None:
-            raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
-        records_type[record_uuid] = json.loads(data_json)
+            raise AlreadyExistsException(MSG_ALREADY_EXISTS.format(domain_uuid, str_type, record_uuid))
+        records_type[record_uuid] = data_json
 
     def __del_record(self, record_id : DltRecordId) -> None:
         domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid
@@ -74,7 +74,7 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer):
         records_type   : Dict[str, Dict] = records_domain.setdefault(str_type, {})
         record         : Optional[Dict] = records_type.get(record_uuid)
         if record is None:
-            raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
+            raise DoesNotExistException(MSG_NOT_EXISTS.format(domain_uuid, str_type, record_uuid))
         records_type.discard(record_uuid)
 
     def __publish(self, operation : DltRecordOperationEnum, record_id : DltRecordId) -> None:
@@ -99,14 +99,14 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer):
         try:
             operation : DltRecordOperationEnum = request.operation
             if operation == DLTRECORDOPERATION_ADD:
-                self.__set_record(record_id, False, request.data_json)
+                self.__set_record(record_id, False, json.loads(request.data_json))
             elif operation == DLTRECORDOPERATION_UPDATE:
-                self.__set_record(record_id, True, request.data_json)
+                self.__set_record(record_id, True, json.loads(request.data_json))
             elif operation == DLTRECORDOPERATION_DELETE:
                 self.__del_record(record_id)
             else:
                 str_operation = DltRecordOperationEnum.Name(operation).upper().replace('DLTRECORDOPERATION_', '')
-                raise NotImplementedError('DltRecordOperationEnum({:s})'.format(str_operation))
+                raise NotImplementedError(MSG_OPERATION_NOT_IMPLEMENTED.format(str_operation))
             self.__publish(operation, record_id)
             response.status = DLTRECORDSTATUS_SUCCEEDED
         except Exception as e: # pylint: disable=broad-except
@@ -117,11 +117,12 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer):
 
     def GetFromDlt(self, request : DltRecordId, context : grpc.ServicerContext) -> DltRecord:
         LOGGER.info('[GetFromDlt] request={:s}'.format(grpc_message_to_json_string(request)))
-        record = self.__get_record(request, True)
+        record = self.__get_record(request)
         response = DltRecord()
-        response.record_id.CopyFrom(request) # pylint: disable=no-member
-        response.operation = DLTRECORDOPERATION_UNDEFINED
-        response.data_json = json.dumps(record, sort_keys=True)
+        if record is not None:
+            response.record_id.CopyFrom(request) # pylint: disable=no-member
+            response.operation = DLTRECORDOPERATION_UNDEFINED
+            response.data_json = json.dumps(record, sort_keys=True)
         LOGGER.info('[GetFromDlt] response={:s}'.format(grpc_message_to_json_string(response)))
         return response
 
diff --git a/src/common/tools/context_queries/CheckType.py b/src/common/tools/context_queries/CheckType.py
new file mode 100644
index 0000000000000000000000000000000000000000..f53ad16906336182311d1d98fec428f1472bf748
--- /dev/null
+++ b/src/common/tools/context_queries/CheckType.py
@@ -0,0 +1,28 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union
+from common.DeviceTypes import DeviceTypeEnum
+
+def device_type_is_datacenter(device_type : Union[str, DeviceTypeEnum]) -> bool:
+    return device_type in {
+        DeviceTypeEnum.DATACENTER, DeviceTypeEnum.DATACENTER.value,
+        DeviceTypeEnum.EMULATED_DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER.value
+    }
+
+def device_type_is_network(device_type : Union[str, DeviceTypeEnum]) -> bool:
+    return device_type in {DeviceTypeEnum.NETWORK, DeviceTypeEnum.NETWORK.value}
+
+def endpoint_type_is_border(endpoint_type : str) -> bool:
+    return str(endpoint_type).endswith('/border')
diff --git a/src/common/tools/context_queries/Context.py b/src/common/tools/context_queries/Context.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf0d3be2b7c1890e486492ad55add19a17591353
--- /dev/null
+++ b/src/common/tools/context_queries/Context.py
@@ -0,0 +1,25 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import Context, Empty
+from common.tools.object_factory.Context import json_context
+from context.client.ContextClient import ContextClient
+
+def create_context(
+    context_client : ContextClient, context_uuid : str
+) -> None:
+    existing_context_ids = context_client.ListContextIds(Empty())
+    existing_context_uuids = {context_id.context_uuid.uuid for context_id in existing_context_ids.context_ids}
+    if context_uuid in existing_context_uuids: return
+    context_client.SetContext(Context(**json_context(context_uuid)))
diff --git a/src/common/tools/context_queries/Device.py b/src/common/tools/context_queries/Device.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5b205d46185e12fa51a2cbd8146342abe5bed38
--- /dev/null
+++ b/src/common/tools/context_queries/Device.py
@@ -0,0 +1,59 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Set
+from common.proto.context_pb2 import ContextId, Device, Empty, Topology, TopologyId
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+
+def get_existing_device_uuids(context_client : ContextClient) -> Set[str]:
+    existing_device_ids = context_client.ListDeviceIds(Empty())
+    existing_device_uuids = {device_id.device_uuid.uuid for device_id in existing_device_ids.device_ids}
+    return existing_device_uuids
+
+def add_device_to_topology(
+    context_client : ContextClient, context_id : ContextId, topology_uuid : str, device_uuid : str
+) -> bool:
+    topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id))
+    topology_ro = context_client.GetTopology(topology_id)
+    device_uuids = {device_id.device_uuid.uuid for device_id in topology_ro.device_ids}
+    if device_uuid in device_uuids: return False # already existed
+
+    topology_rw = Topology()
+    topology_rw.CopyFrom(topology_ro)
+    topology_rw.device_ids.add().device_uuid.uuid = device_uuid # pylint: disable=no-member
+    context_client.SetTopology(topology_rw)
+    return True
+
+def get_uuids_of_devices_in_topology(
+    context_client : ContextClient, context_id : ContextId, topology_uuid : str
+) -> List[str]:
+    topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id))
+    topology = context_client.GetTopology(topology_id)
+    device_uuids = [device_id.device_uuid.uuid for device_id in topology.device_ids]
+    return device_uuids
+
+def get_devices_in_topology(
+    context_client : ContextClient, context_id : ContextId, topology_uuid : str
+) -> List[Device]:
+    device_uuids = get_uuids_of_devices_in_topology(context_client, context_id, topology_uuid) 
+
+    all_devices = context_client.ListDevices(Empty())
+    devices_in_topology = list()
+    for device in all_devices.devices:
+        device_uuid = device.device_id.device_uuid.uuid
+        if device_uuid not in device_uuids: continue
+        devices_in_topology.append(device)
+
+    return devices_in_topology
diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py
new file mode 100644
index 0000000000000000000000000000000000000000..c47db248e61485e314703a43ce3cd535409cdea7
--- /dev/null
+++ b/src/common/tools/context_queries/InterDomain.py
@@ -0,0 +1,251 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Dict, List, Set, Tuple
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice, TopologyId
+from common.proto.pathcomp_pb2 import PathCompRequest
+from common.tools.context_queries.CheckType import device_type_is_network
+from common.tools.context_queries.Device import get_devices_in_topology, get_uuids_of_devices_in_topology
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from pathcomp.frontend.client.PathCompClient import PathCompClient
+
+LOGGER = logging.getLogger(__name__)
+
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+DATACENTER_DEVICE_TYPES = {DeviceTypeEnum.DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER}
+
+def get_local_device_uuids(context_client : ContextClient) -> Set[str]:
+    topologies = context_client.ListTopologies(ADMIN_CONTEXT_ID)
+    topologies = {topology.topology_id.topology_uuid.uuid : topology for topology in topologies.topologies}
+    LOGGER.info('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys())))
+
+    local_topology_uuids = set(topologies.keys())
+    local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID)
+    LOGGER.info('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids)))
+
+    local_device_uuids = set()
+
+    # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are abstracted as a
+    # local device in inter-domain and the name of the topology is used as abstract device name
+    for local_topology_uuid in local_topology_uuids:
+        if local_topology_uuid == DEFAULT_TOPOLOGY_UUID: continue
+        local_device_uuids.add(local_topology_uuid)
+
+    # add physical devices in the local topologies
+    for local_topology_uuid in local_topology_uuids:
+        topology_device_ids = topologies[local_topology_uuid].device_ids
+        topology_device_uuids = {device_id.device_uuid.uuid for device_id in topology_device_ids}
+        LOGGER.info('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format(
+            str(local_topology_uuid), str(topology_device_uuids)))
+        local_device_uuids.update(topology_device_uuids)
+
+    LOGGER.info('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids)))
+    return local_device_uuids
+
+def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]:
+    interdomain_topology_id = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID))
+    interdomain_topology = context_client.GetTopology(interdomain_topology_id)
+
+    # add abstracted devices in the interdomain topology
+    interdomain_device_ids = interdomain_topology.device_ids
+    interdomain_device_uuids = {device_id.device_uuid.uuid for device_id in interdomain_device_ids}
+    LOGGER.info('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids)))
+    return interdomain_device_uuids
+
+def get_local_domain_devices(context_client : ContextClient) -> List[Device]:
+    local_device_uuids = get_local_device_uuids(context_client)
+    all_devices = context_client.ListDevices(Empty())
+    local_domain_devices = list()
+    for device in all_devices.devices:
+        if not device_type_is_network(device.device_type): continue
+        device_uuid = device.device_id.device_uuid.uuid
+        if device_uuid not in local_device_uuids: continue
+        local_domain_devices.append(device)
+    return local_domain_devices
+
+def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool:
+    interdomain_device_uuids = get_interdomain_device_uuids(context_client)
+    LOGGER.info('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids)))
+    non_interdomain_endpoint_ids = [
+        endpoint_id
+        for endpoint_id in endpoint_ids
+        if endpoint_id.device_id.device_uuid.uuid not in interdomain_device_uuids
+    ]
+    str_non_interdomain_endpoint_ids = [
+        (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid)
+        for endpoint_id in non_interdomain_endpoint_ids
+    ]
+    LOGGER.info('[is_inter_domain] non_interdomain_endpoint_ids={:s}'.format(str(str_non_interdomain_endpoint_ids)))
+    is_inter_domain_ = len(non_interdomain_endpoint_ids) == 0
+    LOGGER.info('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_)))
+    return is_inter_domain_
+
+def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool:
+    local_device_uuids = get_local_device_uuids(context_client)
+    LOGGER.info('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids)))
+    remote_endpoint_ids = [
+        endpoint_id
+        for endpoint_id in endpoint_ids
+        if endpoint_id.device_id.device_uuid.uuid not in local_device_uuids
+    ]
+    str_remote_endpoint_ids = [
+        (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid)
+        for endpoint_id in remote_endpoint_ids
+    ]
+    LOGGER.info('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids)))
+    is_multi_domain_ = len(remote_endpoint_ids) > 0
+    LOGGER.info('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_)))
+    return is_multi_domain_
+
+def compute_interdomain_path(
+    pathcomp_client : PathCompClient, slice_ : Slice
+) -> List[Tuple[str, List[EndPointId]]]:
+    context_uuid = slice_.slice_id.context_id.context_uuid.uuid
+    slice_uuid = slice_.slice_id.slice_uuid.uuid
+
+    pathcomp_req = PathCompRequest()
+    pathcomp_req.shortest_path.Clear()                                          # pylint: disable=no-member
+    pathcomp_req_svc = pathcomp_req.services.add()                              # pylint: disable=no-member
+    pathcomp_req_svc.service_id.context_id.context_uuid.uuid = context_uuid
+    pathcomp_req_svc.service_id.service_uuid.uuid = slice_uuid
+    pathcomp_req_svc.service_type = ServiceTypeEnum.SERVICETYPE_L2NM
+
+    for endpoint_id in slice_.slice_endpoint_ids:
+        service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add()
+        service_endpoint_id.CopyFrom(endpoint_id)
+    
+    constraint_bw = pathcomp_req_svc.service_constraints.add()
+    constraint_bw.custom.constraint_type = 'bandwidth[gbps]'
+    constraint_bw.custom.constraint_value = '10.0'
+
+    constraint_lat = pathcomp_req_svc.service_constraints.add()
+    constraint_lat.custom.constraint_type = 'latency[ms]'
+    constraint_lat.custom.constraint_value = '100.0'
+
+    LOGGER.info('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req)))
+    pathcomp_rep = pathcomp_client.Compute(pathcomp_req)
+    LOGGER.info('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep)))
+
+    service = next(iter([
+        service
+        for service in pathcomp_rep.services
+        if service.service_id == pathcomp_req_svc.service_id
+    ]), None)
+    if service is None:
+        str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id)
+        raise Exception('Service({:s}) not found'.format(str_service_id))
+
+    connection = next(iter([
+        connection
+        for connection in pathcomp_rep.connections
+        if connection.service_id == pathcomp_req_svc.service_id
+    ]), None)
+    if connection is None:
+        str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id)
+        raise Exception('Connection for Service({:s}) not found'.format(str_service_id))
+
+    domain_list : List[str] = list()
+    domain_to_endpoint_ids : Dict[str, List[EndPointId]] = dict()
+    for endpoint_id in connection.path_hops_endpoint_ids:
+        device_uuid = endpoint_id.device_id.device_uuid.uuid
+        #endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+        if device_uuid not in domain_to_endpoint_ids: domain_list.append(device_uuid)
+        domain_to_endpoint_ids.setdefault(device_uuid, []).append(endpoint_id)
+
+    return [
+        (domain_uuid, domain_to_endpoint_ids.get(domain_uuid))
+        for domain_uuid in domain_list
+    ]
+
+def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]:
+    devices_to_domains : Dict[str, str] = dict()
+    contexts = context_client.ListContexts(Empty())
+    for context in contexts.contexts:
+        context_id = context.context_id
+        context_uuid = context_id.context_uuid.uuid
+        topologies = context_client.ListTopologies(context_id)
+        if context_uuid == DEFAULT_CONTEXT_UUID:
+            for topology in topologies.topologies:
+                topology_id = topology.topology_id
+                topology_uuid = topology_id.topology_uuid.uuid
+                if topology_uuid in {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID}: continue
+
+                # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are
+                # abstracted as a local device in inter-domain and the name of the topology is used as
+                # abstract device name
+                devices_to_domains[topology_uuid] = topology_uuid
+
+                # add physical devices in the local topology
+                for device_id in topology.device_ids:
+                    device_uuid = device_id.device_uuid.uuid
+                    devices_to_domains[device_uuid] = topology_uuid
+        else:
+            # for each topology in a remote context
+            for topology in topologies.topologies:
+                topology_id = topology.topology_id
+                topology_uuid = topology_id.topology_uuid.uuid
+
+                # if topology is not interdomain
+                if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: continue
+
+                # add devices to the remote domain list
+                for device_id in topology.device_ids:
+                    device_uuid = device_id.device_uuid.uuid
+                    devices_to_domains[device_uuid] = context_uuid
+
+    return devices_to_domains
+
+def compute_traversed_domains(
+    context_client : ContextClient, interdomain_path : List[Tuple[str, List[EndPointId]]]
+) -> List[Tuple[str, bool, List[EndPointId]]]:
+
+    local_device_uuids = get_local_device_uuids(context_client)
+    LOGGER.info('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids)))
+
+    interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
+    interdomain_devices = {
+        device.device_id.device_uuid.uuid : device
+        for device in interdomain_devices
+    }
+
+    devices_to_domains = get_device_to_domain_map(context_client)
+    LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains)))
+
+    traversed_domains : List[Tuple[str, bool, List[EndPointId]]] = list()
+    domains_dict : Dict[str, Tuple[str, bool, List[EndPointId]]] = dict()
+    for device_uuid, endpoint_ids in interdomain_path:
+        domain_uuid = devices_to_domains.get(device_uuid, '---')
+        domain = domains_dict.get(domain_uuid)
+        if domain is None:
+            is_local_domain = domain_uuid in local_device_uuids
+            domain = (domain_uuid, is_local_domain, [])
+            traversed_domains.append(domain)
+            domains_dict[domain_uuid] = domain
+        domain[2].extend(endpoint_ids)
+
+    str_traversed_domains = [
+        (domain_uuid, is_local_domain, [
+            (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid)
+            for endpoint_id in endpoint_ids
+        ])
+        for domain_uuid,is_local_domain,endpoint_ids in traversed_domains
+    ]
+    LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains)))
+    return traversed_domains
diff --git a/src/common/tools/context_queries/Link.py b/src/common/tools/context_queries/Link.py
new file mode 100644
index 0000000000000000000000000000000000000000..abc5fa91af8d24c8a3cdf18fda0e7680da9143a7
--- /dev/null
+++ b/src/common/tools/context_queries/Link.py
@@ -0,0 +1,59 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Set
+from common.proto.context_pb2 import ContextId, Empty, Link, Topology, TopologyId
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+
+def get_existing_link_uuids(context_client : ContextClient) -> Set[str]:
+    existing_link_ids = context_client.ListLinkIds(Empty())
+    existing_link_uuids = {link_id.link_uuid.uuid for link_id in existing_link_ids.link_ids}
+    return existing_link_uuids
+
+def add_link_to_topology(
+    context_client : ContextClient, context_id : ContextId, topology_uuid : str, link_uuid : str
+) -> bool:
+    topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id))
+    topology_ro = context_client.GetTopology(topology_id)
+    link_uuids = {link_id.link_uuid.uuid for link_id in topology_ro.link_ids}
+    if link_uuid in link_uuids: return False # already existed
+
+    topology_rw = Topology()
+    topology_rw.CopyFrom(topology_ro)
+    topology_rw.link_ids.add().link_uuid.uuid = link_uuid # pylint: disable=no-member
+    context_client.SetTopology(topology_rw)
+    return True
+
+def get_uuids_of_links_in_topology(
+    context_client : ContextClient, context_id : ContextId, topology_uuid : str
+) -> List[str]:
+    topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id))
+    topology = context_client.GetTopology(topology_id)
+    link_uuids = [link_id.link_uuid.uuid for link_id in topology.link_ids]
+    return link_uuids
+
+def get_links_in_topology(
+    context_client : ContextClient, context_id : ContextId, topology_uuid : str
+) -> List[Link]:
+    link_uuids = get_uuids_of_links_in_topology(context_client, context_id, topology_uuid) 
+
+    all_links = context_client.ListLinks(Empty())
+    links_in_topology = list()
+    for link in all_links.links:
+        link_uuid = link.link_id.link_uuid.uuid
+        if link_uuid not in link_uuids: continue
+        links_in_topology.append(link)
+
+    return links_in_topology
diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcf1b96bb51571a71ab35fb743f8154f02e2d200
--- /dev/null
+++ b/src/common/tools/context_queries/Topology.py
@@ -0,0 +1,41 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List
+from common.proto.context_pb2 import ContextId, Topology
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology
+from context.client.ContextClient import ContextClient
+
+def create_topology(
+    context_client : ContextClient, context_uuid : str, topology_uuid : str
+) -> None:
+    context_id = ContextId(**json_context_id(context_uuid))
+    existing_topology_ids = context_client.ListTopologyIds(context_id)
+    existing_topology_uuids = {topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids}
+    if topology_uuid in existing_topology_uuids: return
+    context_client.SetTopology(Topology(**json_topology(topology_uuid, context_id=context_id)))
+
+def create_missing_topologies(
+    context_client : ContextClient, context_id : ContextId, topology_uuids : List[str]
+) -> None:
+    # Find existing topologies within own context
+    existing_topology_ids = context_client.ListTopologyIds(context_id)
+    existing_topology_uuids = {topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids}
+
+    # Create topologies within provided context
+    for topology_uuid in topology_uuids:
+        if topology_uuid in existing_topology_uuids: continue
+        grpc_topology = Topology(**json_topology(topology_uuid, context_id=context_id))
+        context_client.SetTopology(grpc_topology)
diff --git a/src/common/tools/context_queries/__init__.py b/src/common/tools/context_queries/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/common/tools/context_queries/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py
index a9dd4f40cbd823752b8cc09936ac48ebe32ec1a5..aa95767ab2807e4ac7ac331c47622a8ece0e88ff 100644
--- a/src/common/tools/grpc/Constraints.py
+++ b/src/common/tools/grpc/Constraints.py
@@ -21,7 +21,33 @@ from typing import Any, Dict, Optional, Tuple
 from common.proto.context_pb2 import Constraint, EndPointId
 from common.tools.grpc.Tools import grpc_message_to_json_string
 
-def update_constraint_custom(constraints, constraint_type : str, fields : Dict[str, Tuple[Any, bool]]) -> Constraint:
+def update_constraint_custom_scalar(
+    constraints, constraint_type : str, value : Any, raise_if_differs : bool = False
+) -> Constraint:
+
+    for constraint in constraints:
+        if constraint.WhichOneof('constraint') != 'custom': continue
+        if constraint.custom.constraint_type != constraint_type: continue
+        json_constraint_value = json.loads(constraint.custom.constraint_value)
+        break   # found, end loop
+    else:
+        # not found, add it
+        constraint = constraints.add()      # pylint: disable=no-member
+        constraint.custom.constraint_type = constraint_type
+        json_constraint_value = None
+
+    if (json_constraint_value is None) or not raise_if_differs:
+        # missing or raise_if_differs=False, add/update it
+        json_constraint_value = value
+    elif json_constraint_value != value:
+        # exists, differs, and raise_if_differs=True
+        msg = 'Specified value({:s}) differs existing value({:s})'
+        raise Exception(msg.format(str(value), str(json_constraint_value)))
+
+    constraint.custom.constraint_value = json.dumps(json_constraint_value, sort_keys=True)
+    return constraint
+
+def update_constraint_custom_dict(constraints, constraint_type : str, fields : Dict[str, Tuple[Any, bool]]) -> Constraint:
     # fields: Dict[field_name : str, Tuple[field_value : Any, raise_if_differs : bool]]
 
     for constraint in constraints:
@@ -45,6 +71,7 @@ def update_constraint_custom(constraints, constraint_type : str, fields : Dict[s
             raise Exception(msg.format(str(field_name), str(field_value), str(json_constraint_value[field_name])))
 
     constraint.custom.constraint_value = json.dumps(json_constraint_value, sort_keys=True)
+    return constraint
 
 def update_constraint_endpoint_location(
     constraints, endpoint_id : EndPointId,
@@ -129,10 +156,18 @@ def copy_constraints(source_constraints, target_constraints):
         if constraint_kind == 'custom':
             custom = source_constraint.custom
             constraint_type = custom.constraint_type
-            constraint_value = json.loads(custom.constraint_value)
-            raise_if_differs = True
-            fields = {name:(value, raise_if_differs) for name,value in constraint_value.items()}
-            update_constraint_custom(target_constraints, constraint_type, fields)
+            try:
+                constraint_value = json.loads(custom.constraint_value)
+            except: # pylint: disable=bare-except
+                constraint_value = custom.constraint_value
+            if isinstance(constraint_value, dict):
+                raise_if_differs = True
+                fields = {name:(value, raise_if_differs) for name,value in constraint_value.items()}
+                update_constraint_custom_dict(target_constraints, constraint_type, fields)
+            else:
+                raise_if_differs = True
+                update_constraint_custom_scalar(
+                    target_constraints, constraint_type, constraint_value, raise_if_differs=raise_if_differs)
 
         elif constraint_kind == 'endpoint_location':
             endpoint_id = source_constraint.endpoint_location.endpoint_id
diff --git a/src/common/tools/object_factory/Slice.py b/src/common/tools/object_factory/Slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ab666aa6ed379eb0b8948b1178aa13069d70bf4
--- /dev/null
+++ b/src/common/tools/object_factory/Slice.py
@@ -0,0 +1,48 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from typing import Dict, List, Optional
+from common.proto.context_pb2 import SliceStatusEnum
+
+def get_slice_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str:
+    return 'slc:{:s}/{:s}=={:s}/{:s}'.format(
+        a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'],
+        z_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
+
+def json_slice_id(slice_uuid : str, context_id : Optional[Dict] = None) -> Dict:
+    result = {'slice_uuid': {'uuid': slice_uuid}}
+    if context_id is not None: result['context_id'] = copy.deepcopy(context_id)
+    return result
+
+def json_slice_owner(owner_uuid : str, owner_string : str) -> Dict:
+    return {'owner_uuid': {'uuid': owner_uuid}, 'owner_string': owner_string}
+
+def json_slice(
+    slice_uuid : str, context_id : Optional[Dict] = None,
+    status : SliceStatusEnum = SliceStatusEnum.SLICESTATUS_PLANNED, endpoint_ids : List[Dict] = [],
+    constraints : List[Dict] = [], config_rules : List[Dict] = [], service_ids : List[Dict] = [],
+    subslice_ids : List[Dict] = [], owner : Optional[Dict] = None):
+
+    result = {
+        'slice_id'          : json_slice_id(slice_uuid, context_id=context_id),
+        'slice_status'      : {'slice_status': status},
+        'slice_endpoint_ids': copy.deepcopy(endpoint_ids),
+        'slice_constraints' : copy.deepcopy(constraints),
+        'slice_config'      : {'config_rules': copy.deepcopy(config_rules)},
+        'slice_service_ids' : copy.deepcopy(service_ids),
+        'slice_subslice_ids': copy.deepcopy(subslice_ids),
+    }
+    if owner is not None: result['slice_owner'] = owner
+    return result
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index 3cc823a2aa7a06de6cb591ef6d668ba7eeef5cbd..8aa410e9a2136f8f0c6df94a9d17ea376fcfc516 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -22,7 +22,7 @@ from werkzeug.exceptions import UnsupportedMediaType
 from common.proto.context_pb2 import Slice
 from common.tools.grpc.ConfigRules import update_config_rule_custom
 from common.tools.grpc.Constraints import (
-    update_constraint_custom, update_constraint_endpoint_location, update_constraint_endpoint_priority,
+    update_constraint_custom_dict, update_constraint_endpoint_location, update_constraint_endpoint_priority,
     update_constraint_sla_availability)
 from common.tools.grpc.EndPointIds import update_endpoint_ids
 from common.tools.grpc.Tools import grpc_message_to_json_string
@@ -99,7 +99,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
     update_config_rule_custom(config_rules, endpoint_settings_key, field_updates)
 
     if len(diversity_constraints) > 0:
-        update_constraint_custom(constraints, 'diversity', diversity_constraints)
+        update_constraint_custom_dict(constraints, 'diversity', diversity_constraints)
 
     update_constraint_endpoint_location(constraints, endpoint_id, region=site_id)
     if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority)
diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py
index 9715098bd3cd979d78a83b4839e40613d3997d1e..f5fc3fbc735c2f62b39223b9ed20aa3730ecd11d 100644
--- a/src/context/client/EventsCollector.py
+++ b/src/context/client/EventsCollector.py
@@ -12,7 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging, queue, threading
+from typing import Callable
+import grpc, logging, queue, threading, time
 from common.proto.context_pb2 import Empty
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
@@ -20,6 +21,41 @@ from context.client.ContextClient import ContextClient
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
+class _Collector(threading.Thread):
+    def __init__(
+        self, subscription_func : Callable, events_queue = queue.Queue,
+        terminate = threading.Event, log_events_received: bool = False
+    ) -> None:
+        super().__init__(daemon=False)
+        self._subscription_func = subscription_func
+        self._events_queue = events_queue
+        self._terminate = terminate
+        self._log_events_received = log_events_received
+        self._stream = None
+
+    def cancel(self) -> None:
+        if self._stream is None: return
+        self._stream.cancel()
+
+    def run(self) -> None:
+        while not self._terminate.is_set():
+            self._stream = self._subscription_func()
+            try:
+                for event in self._stream:
+                    if self._log_events_received:
+                        str_event = grpc_message_to_json_string(event)
+                        LOGGER.info('[_collect] event: {:s}'.format(str_event))
+                    self._events_queue.put_nowait(event)
+            except grpc.RpcError as e:
+                if e.code() == grpc.StatusCode.UNAVAILABLE:
+                    LOGGER.info('[_collect] UNAVAILABLE... retrying...')
+                    time.sleep(0.5)
+                    continue
+                elif e.code() == grpc.StatusCode.CANCELLED:
+                    break
+                else:
+                    raise # pragma: no cover
+
 class EventsCollector:
     def __init__(
         self, context_client          : ContextClient,
@@ -31,60 +67,49 @@ class EventsCollector:
         activate_service_collector    : bool = True,
         activate_slice_collector      : bool = True,
         activate_connection_collector : bool = True,
-
     ) -> None:
         self._events_queue = queue.Queue()
+        self._terminate = threading.Event()
         self._log_events_received = log_events_received
 
-        self._context_stream, self._context_thread = None, None
-        if activate_context_collector:
-            self._context_stream = context_client.GetContextEvents(Empty())
-            self._context_thread = self._create_collector_thread(self._context_stream)
-
-        self._topology_stream, self._topology_thread = None, None
-        if activate_topology_collector:
-            self._topology_stream = context_client.GetTopologyEvents(Empty())
-            self._topology_thread = self._create_collector_thread(self._topology_stream)
-
-        self._device_stream, self._device_thread = None, None
-        if activate_device_collector:
-            self._device_stream = context_client.GetDeviceEvents(Empty())
-            self._device_thread = self._create_collector_thread(self._device_stream)
-
-        self._link_stream, self._link_thread = None, None
-        if activate_link_collector:
-            self._link_stream = context_client.GetLinkEvents(Empty())
-            self._link_thread = self._create_collector_thread(self._link_stream)
-
-        self._service_stream, self._service_thread = None, None
-        if activate_service_collector:
-            self._service_stream = context_client.GetServiceEvents(Empty())
-            self._service_thread = self._create_collector_thread(self._service_stream)
-
-        self._slice_stream, self._slice_thread = None, None
-        if activate_slice_collector:
-            self._slice_stream = context_client.GetSliceEvents(Empty())
-            self._slice_thread = self._create_collector_thread(self._slice_stream)
-
-        self._connection_stream, self._connection_thread = None, None
-        if activate_connection_collector:
-            self._connection_stream = context_client.GetConnectionEvents(Empty())
-            self._connection_thread = self._create_collector_thread(self._connection_stream)
-
-    def _create_collector_thread(self, stream, as_daemon : bool = False):
-        return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon)
-
-    def _collect(self, events_stream) -> None:
-        try:
-            for event in events_stream:
-                if self._log_events_received:
-                    LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event)))
-                self._events_queue.put_nowait(event)
-        except grpc.RpcError as e:
-            if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member
-                raise # pragma: no cover
+        self._context_thread = _Collector(
+                lambda: context_client.GetContextEvents(Empty()),
+                self._events_queue, self._terminate, self._log_events_received
+            ) if activate_context_collector else None
+
+        self._topology_thread = _Collector(
+                lambda: context_client.GetTopologyEvents(Empty()),
+                self._events_queue, self._terminate, self._log_events_received
+            ) if activate_topology_collector else None
+
+        self._device_thread = _Collector(
+                lambda: context_client.GetDeviceEvents(Empty()),
+                self._events_queue, self._terminate, self._log_events_received
+            ) if activate_device_collector else None
+
+        self._link_thread = _Collector(
+                lambda: context_client.GetLinkEvents(Empty()),
+                self._events_queue, self._terminate, self._log_events_received
+            ) if activate_link_collector else None
+
+        self._service_thread = _Collector(
+                lambda: context_client.GetServiceEvents(Empty()),
+                self._events_queue, self._terminate, self._log_events_received
+            ) if activate_service_collector else None
+
+        self._slice_thread = _Collector(
+                lambda: context_client.GetSliceEvents(Empty()),
+                self._events_queue, self._terminate, self._log_events_received
+            ) if activate_slice_collector else None
+
+        self._connection_thread = _Collector(
+                lambda: context_client.GetConnectionEvents(Empty()),
+                self._events_queue, self._terminate, self._log_events_received
+            ) if activate_connection_collector else None
 
     def start(self):
+        self._terminate.clear()
+
         if self._context_thread    is not None: self._context_thread.start()
         if self._topology_thread   is not None: self._topology_thread.start()
         if self._device_thread     is not None: self._device_thread.start()
@@ -102,25 +127,28 @@ class EventsCollector:
     def get_events(self, block : bool = True, timeout : float = 0.1, count : int = None):
         events = []
         if count is None:
-            while True:
+            while not self._terminate.is_set():
                 event = self.get_event(block=block, timeout=timeout)
                 if event is None: break
                 events.append(event)
         else:
             for _ in range(count):
+                if self._terminate.is_set(): break
                 event = self.get_event(block=block, timeout=timeout)
                 if event is None: continue
                 events.append(event)
         return sorted(events, key=lambda e: e.event.timestamp.timestamp)
 
     def stop(self):
-        if self._context_stream    is not None: self._context_stream.cancel()
-        if self._topology_stream   is not None: self._topology_stream.cancel()
-        if self._device_stream     is not None: self._device_stream.cancel()
-        if self._link_stream       is not None: self._link_stream.cancel()
-        if self._service_stream    is not None: self._service_stream.cancel()
-        if self._slice_stream      is not None: self._slice_stream.cancel()
-        if self._connection_stream is not None: self._connection_stream.cancel()
+        self._terminate.set()
+
+        if self._context_thread    is not None: self._context_thread.cancel()
+        if self._topology_thread   is not None: self._topology_thread.cancel()
+        if self._device_thread     is not None: self._device_thread.cancel()
+        if self._link_thread       is not None: self._link_thread.cancel()
+        if self._service_thread    is not None: self._service_thread.cancel()
+        if self._slice_thread      is not None: self._slice_thread.cancel()
+        if self._connection_thread is not None: self._connection_thread.cancel()
 
         if self._context_thread    is not None: self._context_thread.join()
         if self._topology_thread   is not None: self._topology_thread.join()
diff --git a/src/context/service/database/SliceModel.py b/src/context/service/database/SliceModel.py
index bc00ada43758c9c5ffefbb88a87134aa46fbd73a..74bb60b401f656fdcfec8b0466019f87a8f1b41e 100644
--- a/src/context/service/database/SliceModel.py
+++ b/src/context/service/database/SliceModel.py
@@ -46,6 +46,8 @@ class SliceModel(Model):
     slice_constraints_fk = ForeignKeyField(ConstraintsModel)
     slice_status = EnumeratedField(ORM_SliceStatusEnum, required=True)
     slice_config_fk = ForeignKeyField(ConfigModel)
+    slice_owner_uuid = StringField(required=False, allow_empty=True)
+    slice_owner_string = StringField(required=False, allow_empty=True)
 
     def delete(self) -> None:
         # pylint: disable=import-outside-toplevel
@@ -91,7 +93,11 @@ class SliceModel(Model):
     def dump_subslice_ids(self) -> List[Dict]:
         from .RelationModels import SliceSubSliceModel # pylint: disable=import-outside-toplevel
         db_subslices = get_related_objects(self, SliceSubSliceModel, 'sub_slice_fk')
-        return [db_subslice.dump_id() for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk'))]
+        return [
+            db_subslice.dump_id()
+            for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk'))
+            if db_subslice.pk != self.pk # if I'm subslice of other slice, I will appear as subslice of myself
+        ]
 
     def dump(   # pylint: disable=arguments-differ
             self, include_endpoint_ids=True, include_constraints=True, include_config_rules=True,
@@ -106,4 +112,11 @@ class SliceModel(Model):
         if include_config_rules: result.setdefault('slice_config', {})['config_rules'] = self.dump_config()
         if include_service_ids: result['slice_service_ids'] = self.dump_service_ids()
         if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids()
+
+        if len(self.slice_owner_uuid) > 0:
+            result.setdefault('slice_owner', {}).setdefault('owner_uuid', {})['uuid'] = self.slice_owner_uuid
+
+        if len(self.slice_owner_string) > 0:
+            result.setdefault('slice_owner', {})['owner_string'] = self.slice_owner_string
+
         return result
diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py
index 88f7bd8af82009f1fc45bace87776d9cbc6d6543..ec12795bd8948ad93bfd759d222ef18b960bc0e3 100644
--- a/src/context/service/grpc_server/ContextServiceServicerImpl.py
+++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py
@@ -606,6 +606,8 @@ class ContextServiceServicerImpl(ContextServiceServicer):
                 'slice_constraints_fk': db_constraints,
                 'slice_status'        : grpc_to_enum__slice_status(request.slice_status.slice_status),
                 'slice_config_fk'     : db_running_config,
+                'slice_owner_uuid'    : request.slice_owner.owner_uuid.uuid,
+                'slice_owner_string'  : request.slice_owner.owner_string,
             })
             db_slice, updated = result
 
@@ -622,7 +624,7 @@ class ContextServiceServicerImpl(ContextServiceServicer):
 
                 db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
 
-                str_slice_endpoint_key = key_to_str([slice_uuid, str_endpoint_key], separator='--')
+                str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--')
                 result : Tuple[SliceEndPointModel, bool] = get_or_create_object(
                     self.database, SliceEndPointModel, str_slice_endpoint_key, {
                         'slice_fk': db_slice, 'endpoint_fk': db_endpoint})
diff --git a/src/dlt/connector/Config.py b/src/dlt/connector/Config.py
index 9953c820575d42fa88351cc8de022d880ba96e6a..bdf9f306959e86160012541e8a72cc9aabb019c0 100644
--- a/src/dlt/connector/Config.py
+++ b/src/dlt/connector/Config.py
@@ -11,3 +11,14 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+import os
+
+DEFAULT_DLT_GATEWAY_HOST = '127.0.0.1'
+DEFAULT_DLT_GATEWAY_PORT = '50051'
+
+# Find IP:port of gateway container as follows:
+# - first check env vars DLT_GATEWAY_HOST & DLT_GATEWAY_PORT
+# - if not set, use DEFAULT_DLT_GATEWAY_HOST & DEFAULT_DLT_GATEWAY_PORT
+DLT_GATEWAY_HOST = str(os.environ.get('DLT_GATEWAY_HOST', DEFAULT_DLT_GATEWAY_HOST))
+DLT_GATEWAY_PORT = int(os.environ.get('DLT_GATEWAY_PORT', DEFAULT_DLT_GATEWAY_PORT))
diff --git a/src/dlt/connector/Dockerfile b/src/dlt/connector/Dockerfile
index 51e9ec506f0c8a6c35ceac68833e3ad683ef8e63..c5d600ee0d55deb5a8bd4dca2d4f12cd092ad420 100644
--- a/src/dlt/connector/Dockerfile
+++ b/src/dlt/connector/Dockerfile
@@ -64,6 +64,8 @@ RUN python3 -m pip install -r requirements.txt
 WORKDIR /var/teraflow
 COPY src/context/. context/
 COPY src/dlt/connector/. dlt/connector
+COPY src/interdomain/. interdomain/
+COPY src/slice/. slice/
 
 # Start the service
 ENTRYPOINT ["python", "-m", "dlt.connector.service"]
diff --git a/src/dlt/connector/client/DltConnectorClient.py b/src/dlt/connector/client/DltConnectorClient.py
index f48562996b067ca81a99b6ceb7288029be7ba1c8..1ca511d0434dd72458982bf7c7d55d8bbd1859f1 100644
--- a/src/dlt/connector/client/DltConnectorClient.py
+++ b/src/dlt/connector/client/DltConnectorClient.py
@@ -15,7 +15,8 @@
 import grpc, logging
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_host, get_service_port_grpc
-from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId
+from common.proto.context_pb2 import Empty, TopologyId
+from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId
 from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceStub
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
@@ -46,49 +47,63 @@ class DltConnectorClient:
         self.stub = None
 
     @RETRY_DECORATOR
-    def RecordAll(self, request : Empty) -> Empty:
+    def RecordAll(self, request : TopologyId) -> Empty:
         LOGGER.debug('RecordAll request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.RecordAll(request)
         LOGGER.debug('RecordAll result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
-    def RecordAllDevices(self, request : Empty) -> Empty:
+    def RecordAllDevices(self, request : TopologyId) -> Empty:
         LOGGER.debug('RecordAllDevices request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.RecordAllDevices(request)
         LOGGER.debug('RecordAllDevices result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
-    def RecordDevice(self, request : DeviceId) -> Empty:
+    def RecordDevice(self, request : DltDeviceId) -> Empty:
         LOGGER.debug('RecordDevice request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.RecordDevice(request)
         LOGGER.debug('RecordDevice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
-    def RecordAllServices(self, request : Empty) -> Empty:
+    def RecordAllLinks(self, request : TopologyId) -> Empty:
+        LOGGER.debug('RecordAllLinks request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordAllLinks(request)
+        LOGGER.debug('RecordAllLinks result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordLink(self, request : DltLinkId) -> Empty:
+        LOGGER.debug('RecordLink request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordLink(request)
+        LOGGER.debug('RecordLink result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordAllServices(self, request : TopologyId) -> Empty:
         LOGGER.debug('RecordAllServices request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.RecordAllServices(request)
         LOGGER.debug('RecordAllServices result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
-    def RecordService(self, request : ServiceId) -> Empty:
+    def RecordService(self, request : DltServiceId) -> Empty:
         LOGGER.debug('RecordService request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.RecordService(request)
         LOGGER.debug('RecordService result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
-    def RecordAllSlices(self, request : Empty) -> Empty:
+    def RecordAllSlices(self, request : TopologyId) -> Empty:
         LOGGER.debug('RecordAllSlices request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.RecordAllSlices(request)
         LOGGER.debug('RecordAllSlices result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
-    def RecordSlice(self, request : SliceId) -> Empty:
+    def RecordSlice(self, request : DltSliceId) -> Empty:
         LOGGER.debug('RecordSlice request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.RecordSlice(request)
         LOGGER.debug('RecordSlice result: {:s}'.format(grpc_message_to_json_string(response)))
diff --git a/src/dlt/connector/client/DltEventsCollector.py b/src/dlt/connector/client/DltEventsCollector.py
index 6fe2474cead37094c507a8a612181dc7f7243544..d022ac0f0144eecfcdb706665a8bde81fa54492f 100644
--- a/src/dlt/connector/client/DltEventsCollector.py
+++ b/src/dlt/connector/client/DltEventsCollector.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging, queue, threading
+import grpc, logging, queue, threading, time
 from common.proto.dlt_gateway_pb2 import DltRecordSubscription
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from dlt.connector.client.DltGatewayClient import DltGatewayClient
@@ -20,32 +20,36 @@ from dlt.connector.client.DltGatewayClient import DltGatewayClient
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-class DltEventsCollector:
+class DltEventsCollector(threading.Thread):
     def __init__(
         self, dltgateway_client : DltGatewayClient,
         log_events_received     : bool = False,
     ) -> None:
-        self._events_queue = queue.Queue()
+        super().__init__(name='DltEventsCollector', daemon=True)
+        self._dltgateway_client = dltgateway_client
         self._log_events_received = log_events_received
-        subscription = DltRecordSubscription() # bu default subscribe to all
-        self._dltgateway_stream = dltgateway_client.SubscribeToDlt(subscription)
-        self._dltgateway_thread = self._create_collector_thread(self._dltgateway_stream)
-
-    def _create_collector_thread(self, stream, as_daemon : bool = False):
-        return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon)
-
-    def _collect(self, events_stream) -> None:
-        try:
-            for event in events_stream:
-                if self._log_events_received:
-                    LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event)))
-                self._events_queue.put_nowait(event)
-        except grpc.RpcError as e:
-            if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member
-                raise # pragma: no cover
+        self._events_queue = queue.Queue()
+        self._terminate = threading.Event()
+        self._dltgateway_stream = None
 
-    def start(self):
-        if self._dltgateway_thread is not None: self._dltgateway_thread.start()
+    def run(self) -> None:
+        self._terminate.clear()
+        while not self._terminate.is_set():
+            try:
+                subscription = DltRecordSubscription() # bu default subscribe to all
+                self._dltgateway_stream = self._dltgateway_client.SubscribeToDlt(subscription)
+                for event in self._dltgateway_stream:
+                    if self._log_events_received:
+                        LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event)))
+                    self._events_queue.put_nowait(event)
+            except grpc.RpcError as e:
+                if e.code() == grpc.StatusCode.UNAVAILABLE: # pylint: disable=no-member
+                    time.sleep(0.5)
+                    continue
+                elif e.code() == grpc.StatusCode.CANCELLED: # pylint: disable=no-member
+                    break
+                else:
+                    raise # pragma: no cover
 
     def get_event(self, block : bool = True, timeout : float = 0.1):
         try:
@@ -68,5 +72,5 @@ class DltEventsCollector:
         return sorted(events, key=lambda e: e.event.timestamp.timestamp)
 
     def stop(self):
+        self._terminate.set()
         if self._dltgateway_stream is not None: self._dltgateway_stream.cancel()
-        if self._dltgateway_thread is not None: self._dltgateway_thread.join()
diff --git a/src/dlt/connector/client/DltGatewayClient.py b/src/dlt/connector/client/DltGatewayClient.py
index f1f8dec391bb836cea33422176730d250090429d..e2f5530f9a971d0a25cac042d361c52db5c16304 100644
--- a/src/dlt/connector/client/DltGatewayClient.py
+++ b/src/dlt/connector/client/DltGatewayClient.py
@@ -14,14 +14,13 @@
 
 from typing import Iterator
 import grpc, logging
-from common.Constants import ServiceNameEnum
-from common.Settings import get_service_host, get_service_port_grpc
 from common.proto.context_pb2 import Empty, TeraFlowController
 from common.proto.dlt_gateway_pb2 import (
     DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription)
 from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from dlt.connector.Config import DLT_GATEWAY_HOST, DLT_GATEWAY_PORT
 
 LOGGER = logging.getLogger(__name__)
 MAX_RETRIES = 15
@@ -30,8 +29,8 @@ RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION,
 
 class DltGatewayClient:
     def __init__(self, host=None, port=None):
-        if not host: host = get_service_host(ServiceNameEnum.DLT)
-        if not port: port = get_service_port_grpc(ServiceNameEnum.DLT)
+        if not host: host = DLT_GATEWAY_HOST
+        if not port: port = DLT_GATEWAY_PORT
         self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
         LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
         self.channel = None
diff --git a/src/dlt/connector/main_test.py b/src/dlt/connector/main_test.py
index 4ad90eb35444b7ba4de00159372e466e8fc68905..a877a5ce39a29dd8bf37416868d9c5a701912259 100644
--- a/src/dlt/connector/main_test.py
+++ b/src/dlt/connector/main_test.py
@@ -1,8 +1,11 @@
 # pip install grpcio==1.47.0 grpcio-tools==1.47.0 protobuf==3.20.1
+# PYTHONPATH=./src python
 # PYTHONPATH=/home/cttc/teraflow/src python -m dlt.connector.main_test
 
 import logging, sys, time
-from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, DltRecord
+from common.proto.dlt_gateway_pb2 import (
+    DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE,
+    DLTRECORDTYPE_UNDEFINED, DltRecord, DltRecordId)
 from common.tools.object_factory.Device import json_device
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from src.common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device
@@ -12,13 +15,33 @@ from .client.DltEventsCollector import DltEventsCollector
 logging.basicConfig(level=logging.INFO)
 LOGGER = logging.getLogger(__name__)
 
+DLT_GATEWAY_HOST = '127.0.0.1'
+DLT_GATEWAY_PORT = 30551 #50051
+
+def record_found(record : DltRecord) -> bool:
+    found = True
+    found = found and (len(record.record_id.domain_uuid.uuid) > 0)
+    found = found and (record.record_id.type != DLTRECORDTYPE_UNDEFINED)
+    found = found and (len(record.record_id.record_uuid.uuid) > 0)
+    #found = found and (record.operation != DLTRECORDOPERATION_UNDEFINED)
+    found = found and (len(record.data_json) > 0)
+    return found
+
 def main():
-    dltgateway_client = DltGatewayClient(host='127.0.0.1', port=50051)
+    dltgateway_client = DltGatewayClient(host=DLT_GATEWAY_HOST, port=DLT_GATEWAY_PORT)
     dltgateway_collector = DltEventsCollector(dltgateway_client, log_events_received=True)
     dltgateway_collector.start()
 
     time.sleep(3)
 
+    # Check record exists
+    dri = DltRecordId()
+    dri.domain_uuid.uuid = 'non-existing-domain'
+    dri.record_uuid.uuid = 'non-existing-record'
+    dri.type = DLTRECORDTYPE_DEVICE
+    reply = dltgateway_client.GetFromDlt(dri)
+    assert not record_found(reply), 'Record should not exist'
+
     device = Device(**json_device('dev-1', 'packet-router', DEVICEOPERATIONALSTATUS_ENABLED))
 
     r2dlt_req = DltRecord()
diff --git a/src/dlt/connector/service/DltConnector.py b/src/dlt/connector/service/DltConnector.py
deleted file mode 100644
index 0c42d66852e8eb895a07c761f7535a0d768a9e91..0000000000000000000000000000000000000000
--- a/src/dlt/connector/service/DltConnector.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, threading
-from common.tools.grpc.Tools import grpc_message_to_json_string
-from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from dlt.connector.client.DltConnectorClient import DltConnectorClient
-
-LOGGER = logging.getLogger(__name__)
-
-class DltConnector:
-    def __init__(self) -> None:
-        LOGGER.debug('Creating connector...')
-        self._terminate = threading.Event()
-        self._thread = None
-        LOGGER.debug('Connector created')
-
-    def start(self):
-        self._terminate.clear()
-        self._thread = threading.Thread(target=self._run_events_collector)
-        self._thread.start()
-
-    def _run_events_collector(self) -> None:
-        dltconnector_client = DltConnectorClient()
-        context_client = ContextClient()
-        events_collector = EventsCollector(context_client)
-        events_collector.start()
-
-        while not self._terminate.is_set():
-            event = events_collector.get_event()
-            LOGGER.info('Event from Context Received: {:s}'.format(grpc_message_to_json_string(event)))
-
-        events_collector.stop()
-        context_client.close()
-        dltconnector_client.close()
-
-    def stop(self):
-        self._terminate.set()
-        self._thread.join()
diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py
index 860e46f3ab88b097f4aa8e06508b19518055e46f..6c5401cb1724f8a759001d790e835ab78ce4c6c6 100644
--- a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py
+++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py
@@ -13,9 +13,15 @@
 # limitations under the License.
 
 import grpc, logging
-from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
-from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId
+from common.proto.context_pb2 import DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId
+from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId
 from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer
+from common.proto.dlt_gateway_pb2 import DltRecord, DltRecordId, DltRecordOperationEnum, DltRecordTypeEnum
+from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from dlt.connector.client.DltGatewayClient import DltGatewayClient
+from .tools.Checkers import record_exists
 
 LOGGER = logging.getLogger(__name__)
 
@@ -23,6 +29,7 @@ SERVICE_NAME = 'DltConnector'
 METHOD_NAMES = [
     'RecordAll',
     'RecordAllDevices', 'RecordDevice',
+    'RecordAllLinks', 'RecordLink',
     'RecordAllServices', 'RecordService',
     'RecordAllSlices', 'RecordSlice',
 ]
@@ -34,29 +41,121 @@ class DltConnectorServiceServicerImpl(DltConnectorServiceServicer):
         LOGGER.debug('Servicer Created')
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RecordAll(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+    def RecordAll(self, request : TopologyId, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordAllDevices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordDevice(self, request : DltDeviceId, context : grpc.ServicerContext) -> Empty:
+        context_client = ContextClient()
+        device = context_client.GetDevice(request.device_id)
+
+        dltgateway_client = DltGatewayClient()
+
+        dlt_record_id = DltRecordId()
+        dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid
+        dlt_record_id.type             = DltRecordTypeEnum.DLTRECORDTYPE_DEVICE
+        dlt_record_id.record_uuid.uuid = device.device_id.device_uuid.uuid
+
+        LOGGER.info('[RecordDevice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id)))
+        dlt_record = dltgateway_client.GetFromDlt(dlt_record_id)
+        LOGGER.info('[RecordDevice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record)))
+
+        exists = record_exists(dlt_record)
+        LOGGER.info('[RecordDevice] exists = {:s}'.format(str(exists)))
+
+        dlt_record = DltRecord()
+        dlt_record.record_id.CopyFrom(dlt_record_id)
+        dlt_record.operation = \
+            DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \
+            if exists else \
+            DltRecordOperationEnum.DLTRECORDOPERATION_ADD
+
+        dlt_record.data_json = grpc_message_to_json_string(device)
+        LOGGER.info('[RecordDevice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record)))
+        dlt_record_status = dltgateway_client.RecordToDlt(dlt_record)
+        LOGGER.info('[RecordDevice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status)))
         return Empty()
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RecordAllDevices(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+    def RecordAllLinks(self, request : TopologyId, context : grpc.ServicerContext) -> Empty:
         return Empty()
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RecordDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty:
+    def RecordLink(self, request : DltLinkId, context : grpc.ServicerContext) -> Empty:
+        context_client = ContextClient()
+        link = context_client.GetLink(request.link_id)
+
+        dltgateway_client = DltGatewayClient()
+
+        dlt_record_id = DltRecordId()
+        dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid
+        dlt_record_id.type             = DltRecordTypeEnum.DLTRECORDTYPE_LINK
+        dlt_record_id.record_uuid.uuid = link.link_id.link_uuid.uuid
+
+        LOGGER.info('[RecordLink] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id)))
+        dlt_record = dltgateway_client.GetFromDlt(dlt_record_id)
+        LOGGER.info('[RecordLink] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record)))
+
+        exists = record_exists(dlt_record)
+        LOGGER.info('[RecordLink] exists = {:s}'.format(str(exists)))
+
+        dlt_record = DltRecord()
+        dlt_record.record_id.CopyFrom(dlt_record_id)
+        dlt_record.operation = \
+            DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \
+            if exists else \
+            DltRecordOperationEnum.DLTRECORDOPERATION_ADD
+
+        dlt_record.data_json = grpc_message_to_json_string(link)
+        LOGGER.info('[RecordLink] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record)))
+        dlt_record_status = dltgateway_client.RecordToDlt(dlt_record)
+        LOGGER.info('[RecordLink] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status)))
         return Empty()
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RecordAllServices(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+    def RecordAllServices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty:
         return Empty()
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RecordService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty:
+    def RecordService(self, request : DltServiceId, context : grpc.ServicerContext) -> Empty:
         return Empty()
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RecordAllSlices(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+    def RecordAllSlices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty:
         return Empty()
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RecordSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty:
+    def RecordSlice(self, request : DltSliceId, context : grpc.ServicerContext) -> Empty:
+        context_client = ContextClient()
+        slice_ = context_client.GetSlice(request.slice_id)
+
+        dltgateway_client = DltGatewayClient()
+
+        dlt_record_id = DltRecordId()
+        dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid
+        dlt_record_id.type             = DltRecordTypeEnum.DLTRECORDTYPE_SLICE
+        dlt_record_id.record_uuid.uuid = slice_.slice_id.slice_uuid.uuid
+
+        LOGGER.info('[RecordSlice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id)))
+        dlt_record = dltgateway_client.GetFromDlt(dlt_record_id)
+        LOGGER.info('[RecordSlice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record)))
+
+        exists = record_exists(dlt_record)
+        LOGGER.info('[RecordSlice] exists = {:s}'.format(str(exists)))
+
+        dlt_record = DltRecord()
+        dlt_record.record_id.CopyFrom(dlt_record_id)
+        dlt_record.operation = \
+            DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \
+            if exists else \
+            DltRecordOperationEnum.DLTRECORDOPERATION_ADD
+
+        dlt_record.data_json = grpc_message_to_json_string(slice_)
+        LOGGER.info('[RecordSlice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record)))
+        dlt_record_status = dltgateway_client.RecordToDlt(dlt_record)
+        LOGGER.info('[RecordSlice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status)))
         return Empty()
diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py
index 435a93f61bf934a17d9c044756648176e9cb2d2d..76e7bc6f1bb1b50e736327d8f08c0880e45c6835 100644
--- a/src/dlt/connector/service/__main__.py
+++ b/src/dlt/connector/service/__main__.py
@@ -18,6 +18,7 @@ from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
     wait_for_environment_variables)
+from .event_dispatcher.DltEventDispatcher import DltEventDispatcher
 from .DltConnectorService import DltConnectorService
 
 terminate = threading.Event()
@@ -31,7 +32,7 @@ def main():
     global LOGGER # pylint: disable=global-statement
 
     log_level = get_log_level()
-    logging.basicConfig(level=log_level)
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
@@ -48,6 +49,10 @@ def main():
     metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
+    # Starting DLT event dispatcher
+    event_dispatcher = DltEventDispatcher()
+    event_dispatcher.start()
+
     # Starting DLT connector service
     grpc_service = DltConnectorService()
     grpc_service.start()
@@ -57,6 +62,8 @@ def main():
 
     LOGGER.info('Terminating...')
     grpc_service.stop()
+    event_dispatcher.stop()
+    event_dispatcher.join()
 
     LOGGER.info('Bye')
     return 0
diff --git a/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..8973ae621c1291f8ed6e2673f0c64b59712143ee
--- /dev/null
+++ b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py
@@ -0,0 +1,209 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, json, logging, threading
+from typing import Any, Dict, Set
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.proto.context_pb2 import ContextId, Device, EventTypeEnum, Link, Slice, TopologyId
+from common.proto.dlt_connector_pb2 import DltSliceId
+from common.proto.dlt_gateway_pb2 import DltRecordEvent, DltRecordOperationEnum, DltRecordTypeEnum
+from common.tools.context_queries.Context import create_context
+from common.tools.context_queries.Device import add_device_to_topology
+from common.tools.context_queries.Link import add_link_to_topology
+from common.tools.context_queries.Topology import create_topology
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from dlt.connector.client.DltConnectorClient import DltConnectorClient
+from dlt.connector.client.DltEventsCollector import DltEventsCollector
+from dlt.connector.client.DltGatewayClient import DltGatewayClient
+from interdomain.client.InterdomainClient import InterdomainClient
+
+LOGGER = logging.getLogger(__name__)
+
+GET_EVENT_TIMEOUT = 0.5
+
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+
+class Clients:
+    def __init__(self) -> None:
+        self.context_client = ContextClient()
+        self.dlt_connector_client = DltConnectorClient()
+        self.dlt_gateway_client = DltGatewayClient()
+        self.interdomain_client = InterdomainClient()
+
+    def close(self) -> None:
+        self.interdomain_client.close()
+        self.dlt_gateway_client.close()
+        self.dlt_connector_client.close()
+        self.context_client.close()
+
+class DltEventDispatcher(threading.Thread):
+    def __init__(self) -> None:
+        LOGGER.debug('Creating connector...')
+        super().__init__(name='DltEventDispatcher', daemon=True)
+        self._terminate = threading.Event()
+        LOGGER.debug('Connector created')
+
+    def start(self) -> None:
+        self._terminate.clear()
+        return super().start()
+
+    def stop(self):
+        self._terminate.set()
+
+    def run(self) -> None:
+        clients = Clients()
+        create_context(clients.context_client, DEFAULT_CONTEXT_UUID)
+        create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID)
+        create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID)
+
+        dlt_events_collector = DltEventsCollector(clients.dlt_gateway_client, log_events_received=True)
+        dlt_events_collector.start()
+
+        while not self._terminate.is_set():
+            event = dlt_events_collector.get_event(block=True, timeout=GET_EVENT_TIMEOUT)
+            if event is None: continue
+
+            existing_topology_ids = clients.context_client.ListTopologyIds(ADMIN_CONTEXT_ID)
+            local_domain_uuids = {
+                topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids
+            }
+            local_domain_uuids.discard(DEFAULT_TOPOLOGY_UUID)
+            local_domain_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID)
+
+            self.dispatch_event(clients, local_domain_uuids, event)
+
+        dlt_events_collector.stop()
+        clients.close()
+
+    def dispatch_event(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None:
+        record_type : DltRecordTypeEnum = event.record_id.type # {UNDEFINED/CONTEXT/TOPOLOGY/DEVICE/LINK/SERVICE/SLICE}
+        if record_type == DltRecordTypeEnum.DLTRECORDTYPE_DEVICE:
+            self._dispatch_device(clients, local_domain_uuids, event)
+        elif record_type == DltRecordTypeEnum.DLTRECORDTYPE_LINK:
+            self._dispatch_link(clients, local_domain_uuids, event)
+        elif record_type == DltRecordTypeEnum.DLTRECORDTYPE_SLICE:
+            self._dispatch_slice(clients, local_domain_uuids, event)
+        else:
+            raise NotImplementedError('EventType: {:s}'.format(grpc_message_to_json_string(event)))
+
+    def _dispatch_device(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None:
+        domain_uuid : str = event.record_id.domain_uuid.uuid
+
+        if domain_uuid in local_domain_uuids:
+            MSG = '[_dispatch_device] Ignoring DLT event received (local): {:s}'
+            LOGGER.info(MSG.format(grpc_message_to_json_string(event)))
+            return
+
+        MSG = '[_dispatch_device] DLT event received (remote): {:s}'
+        LOGGER.info(MSG.format(grpc_message_to_json_string(event)))
+
+        event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE}
+        if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}:
+            LOGGER.info('[_dispatch_device] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id)))
+            record = clients.dlt_gateway_client.GetFromDlt(event.record_id)
+            LOGGER.info('[_dispatch_device] record={:s}'.format(grpc_message_to_json_string(record)))
+
+            create_context(clients.context_client, domain_uuid)
+            create_topology(clients.context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID)
+            device = Device(**json.loads(record.data_json))
+            clients.context_client.SetDevice(device)
+            device_uuid = device.device_id.device_uuid.uuid # pylint: disable=no-member
+            add_device_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, device_uuid)
+            domain_context_id = ContextId(**json_context_id(domain_uuid))
+            add_device_to_topology(clients.context_client, domain_context_id, DEFAULT_TOPOLOGY_UUID, device_uuid)
+        elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}:
+            raise NotImplementedError('Delete Device')
+
+    def _dispatch_link(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None:
+        domain_uuid : str = event.record_id.domain_uuid.uuid
+
+        if domain_uuid in local_domain_uuids:
+            MSG = '[_dispatch_link] Ignoring DLT event received (local): {:s}'
+            LOGGER.info(MSG.format(grpc_message_to_json_string(event)))
+            return
+
+        MSG = '[_dispatch_link] DLT event received (remote): {:s}'
+        LOGGER.info(MSG.format(grpc_message_to_json_string(event)))
+
+        event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE}
+        if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}:
+            LOGGER.info('[_dispatch_link] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id)))
+            record = clients.dlt_gateway_client.GetFromDlt(event.record_id)
+            LOGGER.info('[_dispatch_link] record={:s}'.format(grpc_message_to_json_string(record)))
+
+            link = Link(**json.loads(record.data_json))
+            clients.context_client.SetLink(link)
+            link_uuid = link.link_id.link_uuid.uuid # pylint: disable=no-member
+            add_link_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, link_uuid)
+        elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}:
+            raise NotImplementedError('Delete Link')
+
+    def _dispatch_slice(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None:
+        event_type  : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE}
+        domain_uuid : str = event.record_id.domain_uuid.uuid
+
+        LOGGER.info('[_dispatch_slice] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id)))
+        record = clients.dlt_gateway_client.GetFromDlt(event.record_id)
+        LOGGER.info('[_dispatch_slice] record={:s}'.format(grpc_message_to_json_string(record)))
+
+        slice_ = Slice(**json.loads(record.data_json))
+
+        context_uuid = slice_.slice_id.context_id.context_uuid.uuid
+        owner_uuid = slice_.slice_owner.owner_uuid.uuid
+        create_context(clients.context_client, context_uuid)
+        create_topology(clients.context_client, context_uuid, DEFAULT_TOPOLOGY_UUID)
+
+        if domain_uuid in local_domain_uuids:
+            # it is for "me"
+            if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}:
+                try:
+                    db_slice = clients.context_client.GetSlice(slice_.slice_id)
+                    # exists
+                    db_json_slice = grpc_message_to_json_string(db_slice)
+                except grpc.RpcError:
+                    # not exists
+                    db_json_slice = None
+
+                _json_slice = grpc_message_to_json_string(slice_)
+                if db_json_slice != _json_slice:
+                    # not exists or is different...
+                    slice_id = clients.interdomain_client.RequestSlice(slice_)
+                    topology_id = TopologyId(**json_topology_id(domain_uuid))
+                    dlt_slice_id = DltSliceId()
+                    dlt_slice_id.topology_id.CopyFrom(topology_id)  # pylint: disable=no-member
+                    dlt_slice_id.slice_id.CopyFrom(slice_id)        # pylint: disable=no-member
+                    clients.dlt_connector_client.RecordSlice(dlt_slice_id)
+
+            elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}:
+                raise NotImplementedError('Delete Slice')
+        elif owner_uuid in local_domain_uuids:
+            # it is owned by me
+            # just update it locally
+            LOGGER.info('[_dispatch_slice] updating locally')
+
+            local_slice = Slice()
+            local_slice.CopyFrom(slice_)
+
+            # pylint: disable=no-member
+            del local_slice.slice_service_ids[:]    # they are from remote domains so will not be present locally
+            del local_slice.slice_subslice_ids[:]   # they are from remote domains so will not be present locally
+
+            clients.context_client.SetSlice(local_slice)
+        else:
+            MSG = '[_dispatch_slice] Ignoring DLT event received (remote): {:s}'
+            LOGGER.info(MSG.format(grpc_message_to_json_string(event)))
+
diff --git a/src/dlt/connector/service/event_dispatcher/__init__.py b/src/dlt/connector/service/event_dispatcher/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/dlt/connector/service/event_dispatcher/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/dlt/connector/service/tools/Checkers.py b/src/dlt/connector/service/tools/Checkers.py
new file mode 100644
index 0000000000000000000000000000000000000000..e25d8d5a5068ee927088697ad3453fba99a1f316
--- /dev/null
+++ b/src/dlt/connector/service/tools/Checkers.py
@@ -0,0 +1,24 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_UNDEFINED, DLTRECORDTYPE_UNDEFINED, DltRecord
+
+def record_exists(record : DltRecord) -> bool:
+    exists = True
+    exists = exists and (len(record.record_id.domain_uuid.uuid) > 0)
+    exists = exists and (record.record_id.type != DLTRECORDTYPE_UNDEFINED)
+    exists = exists and (len(record.record_id.record_uuid.uuid) > 0)
+    #exists = exists and (record.operation != DLTRECORDOPERATION_UNDEFINED)
+    exists = exists and (len(record.data_json) > 0)
+    return exists
diff --git a/src/dlt/connector/service/tools/__init__.py b/src/dlt/connector/service/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/dlt/connector/service/tools/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/interdomain/Dockerfile b/src/interdomain/Dockerfile
index 388fcb76d08b49fdbc20baa3fb0d1ae957fdd46f..ee1071896d0ab0838a2126a2abb9a77278461573 100644
--- a/src/interdomain/Dockerfile
+++ b/src/interdomain/Dockerfile
@@ -63,10 +63,12 @@ RUN python3 -m pip install -r requirements.txt
 # Add component files into working directory
 WORKDIR /var/teraflow
 COPY src/context/. context/
-COPY src/device/. device/
+#COPY src/device/. device/
+COPY src/dlt/. dlt/
 COPY src/interdomain/. interdomain/
-COPY src/monitoring/. monitoring/
-COPY src/service/. service/
+#COPY src/monitoring/. monitoring/
+COPY src/pathcomp/. pathcomp/
+#COPY src/service/. service/
 COPY src/slice/. slice/
 
 # Start the service
diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py
index 01ba90ef5a6cb098e6d419fa0d6abb450893f8c6..a178095aeee81c3e6407cf1c6706b047fd1c65fc 100644
--- a/src/interdomain/service/InterdomainServiceServicerImpl.py
+++ b/src/interdomain/service/InterdomainServiceServicerImpl.py
@@ -12,15 +12,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
-from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
-from common.proto.context_pb2 import (
-    AuthenticationResult, Slice, SliceId, SliceStatus, SliceStatusEnum, TeraFlowController)
+import grpc, logging, uuid
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId
 from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer
-#from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
+from common.tools.context_queries.Context import create_context
+from common.tools.context_queries.InterDomain import (
+    compute_interdomain_path, compute_traversed_domains, get_local_device_uuids, is_inter_domain, is_multi_domain)
+from common.tools.context_queries.Topology import create_topology
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
-from interdomain.service.RemoteDomainClients import RemoteDomainClients
+from dlt.connector.client.DltConnectorClient import DltConnectorClient
+from interdomain.service.topology_abstractor.DltRecordSender import DltRecordSender
+from pathcomp.frontend.client.PathCompClient import PathCompClient
 from slice.client.SliceClient import SliceClient
+from .RemoteDomainClients import RemoteDomainClients
+from .Tools import compose_slice, compute_slice_owner, map_abstract_endpoints_to_real
 
 LOGGER = logging.getLogger(__name__)
 
@@ -37,89 +46,92 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
         context_client = ContextClient()
+        pathcomp_client = PathCompClient()
         slice_client = SliceClient()
-
-        domains_to_endpoints = {}
-        local_domain_uuid = None
-        for slice_endpoint_id in request.slice_endpoint_ids:
-            device_uuid = slice_endpoint_id.device_id.device_uuid.uuid
-            domain_uuid = device_uuid.split('@')[1]
-            endpoints = domains_to_endpoints.setdefault(domain_uuid, [])
-            endpoints.append(slice_endpoint_id)
-            if local_domain_uuid is None: local_domain_uuid = domain_uuid
+        dlt_connector_client = DltConnectorClient()
+
+        local_device_uuids = get_local_device_uuids(context_client)
+        slice_owner_uuid = request.slice_owner.owner_uuid.uuid
+        not_inter_domain = not is_inter_domain(context_client, request.slice_endpoint_ids)
+        no_slice_owner = len(slice_owner_uuid) == 0
+        is_local_slice_owner = slice_owner_uuid in local_device_uuids
+        if not_inter_domain and (no_slice_owner or is_local_slice_owner):
+            str_slice = grpc_message_to_json_string(request)
+            raise Exception('InterDomain can only handle inter-domain slice requests: {:s}'.format(str_slice))
+
+        interdomain_path = compute_interdomain_path(pathcomp_client, request)
+        str_interdomain_path = [
+            [device_uuid, [
+                (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid)
+                for endpoint_id in endpoint_ids
+            ]]
+            for device_uuid, endpoint_ids in interdomain_path
+        ]
+        LOGGER.info('interdomain_path={:s}'.format(str(str_interdomain_path)))
+
+        traversed_domains = compute_traversed_domains(context_client, interdomain_path)
+        str_traversed_domains = [
+            (domain_uuid, is_local_domain, [
+                (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid)
+                for endpoint_id in endpoint_ids
+            ])
+            for domain_uuid,is_local_domain,endpoint_ids in traversed_domains
+        ]
+        LOGGER.info('traversed_domains={:s}'.format(str(str_traversed_domains)))
+
+        slice_owner_uuid = compute_slice_owner(context_client, traversed_domains)
+        LOGGER.info('slice_owner_uuid={:s}'.format(str(slice_owner_uuid)))
+        if slice_owner_uuid is None:
+            raise Exception('Unable to identify slice owner')
 
         reply = Slice()
         reply.CopyFrom(request)
 
-        # decompose remote slices
-        for domain_uuid, slice_endpoint_ids in domains_to_endpoints.items():
-            if domain_uuid == local_domain_uuid: continue
-
-            remote_slice_request = Slice()
-            remote_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid
-            remote_slice_request.slice_id.slice_uuid.uuid = \
-                request.slice_id.slice_uuid.uuid + ':subslice@' + local_domain_uuid
-            remote_slice_request.slice_status.slice_status = request.slice_status.slice_status
-            for endpoint_id in slice_endpoint_ids:
-                slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add()
-                slice_endpoint_id.device_id.device_uuid.uuid = endpoint_id.device_id.device_uuid.uuid
-                slice_endpoint_id.endpoint_uuid.uuid = endpoint_id.endpoint_uuid.uuid
-
-            # add endpoint connecting to remote domain
-            if domain_uuid == 'D1':
-                slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add()
-                slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1'
-                slice_endpoint_id.endpoint_uuid.uuid = '2/1'
-            elif domain_uuid == 'D2':
-                slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add()
-                slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2'
-                slice_endpoint_id.endpoint_uuid.uuid = '2/1'
-
-            interdomain_client = self.remote_domain_clients.get_peer('remote-teraflow')
-            remote_slice_reply = interdomain_client.LookUpSlice(remote_slice_request)
-            if remote_slice_reply == remote_slice_request.slice_id: # pylint: disable=no-member
-                # successful case
-                remote_slice = interdomain_client.OrderSliceFromCatalog(remote_slice_request)
-                if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE:
-                    raise Exception('Remote Slice creation failed. Wrong Slice status returned')
+        dlt_record_sender = DltRecordSender(context_client, dlt_connector_client)
+
+        for domain_uuid, is_local_domain, endpoint_ids in traversed_domains:
+            if is_local_domain:
+                slice_uuid = str(uuid.uuid4())
+                LOGGER.info('[loop] [local] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format(
+                    str(domain_uuid), str(is_local_domain), str(slice_uuid)))
+
+                # local slices always in DEFAULT_CONTEXT_UUID
+                #context_uuid = request.slice_id.context_id.context_uuid.uuid
+                context_uuid = DEFAULT_CONTEXT_UUID
+                endpoint_ids = map_abstract_endpoints_to_real(context_client, domain_uuid, endpoint_ids)
+                sub_slice = compose_slice(
+                    context_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints,
+                    config_rules=request.slice_config.config_rules)
+                LOGGER.info('[loop] [local] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice)))
+                sub_slice_id = slice_client.CreateSlice(sub_slice)
             else:
-                # not in catalog
-                remote_slice = interdomain_client.CreateSliceAndAddToCatalog(remote_slice_request)
-                if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE:
-                    raise Exception('Remote Slice creation failed. Wrong Slice status returned')
-
-            #context_client.SetSlice(remote_slice)
-            #subslice_id = reply.slice_subslice_ids.add()
-            #subslice_id.CopyFrom(remote_slice.slice_id)
-
-        local_slice_request = Slice()
-        local_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid
-        local_slice_request.slice_id.slice_uuid.uuid = request.slice_id.slice_uuid.uuid + ':subslice'
-        local_slice_request.slice_status.slice_status = request.slice_status.slice_status
-        for endpoint_id in domains_to_endpoints[local_domain_uuid]:
-            slice_endpoint_id = local_slice_request.slice_endpoint_ids.add()
-            slice_endpoint_id.CopyFrom(endpoint_id)
-
-        # add endpoint connecting to remote domain
-        if local_domain_uuid == 'D1':
-            slice_endpoint_id = local_slice_request.slice_endpoint_ids.add()
-            slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1'
-            slice_endpoint_id.endpoint_uuid.uuid = '2/1'
-        elif local_domain_uuid == 'D2':
-            slice_endpoint_id = local_slice_request.slice_endpoint_ids.add()
-            slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2'
-            slice_endpoint_id.endpoint_uuid.uuid = '2/1'
-
-        local_slice_reply = slice_client.CreateSlice(local_slice_request)
-        if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member
-            raise Exception('Local Slice creation failed. Wrong Slice Id was returned')
-
-        subslice_id = reply.slice_subslice_ids.add()
-        subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid
-        subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid
-
-        context_client.SetSlice(reply)
-        return reply.slice_id
+                slice_uuid = request.slice_id.slice_uuid.uuid
+                LOGGER.info('[loop] [remote] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format(
+                    str(domain_uuid), str(is_local_domain), str(slice_uuid)))
+
+                # create context/topology for the remote domains where we are creating slices
+                create_context(context_client, domain_uuid)
+                create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID)
+                sub_slice = compose_slice(
+                    domain_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints,
+                    config_rules=request.slice_config.config_rules, owner_uuid=slice_owner_uuid)
+                LOGGER.info('[loop] [remote] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice)))
+                sub_slice_id = context_client.SetSlice(sub_slice)
+                topology_id = TopologyId(**json_topology_id(domain_uuid))
+                dlt_record_sender.add_slice(topology_id, sub_slice)
+
+            LOGGER.info('[loop] adding sub-slice')
+            reply.slice_subslice_ids.add().CopyFrom(sub_slice_id)   # pylint: disable=no-member
+
+        LOGGER.info('Recording Remote Slice requests to DLT')
+        dlt_record_sender.commit()
+
+        LOGGER.info('Activating interdomain slice')
+        reply.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
+
+        LOGGER.info('Updating interdomain slice')
+        slice_id = context_client.SetSlice(reply)
+        return slice_id
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult:
diff --git a/src/interdomain/service/Tools.py b/src/interdomain/service/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb6371603ea90437437541bb995a59813764d9ef
--- /dev/null
+++ b/src/interdomain/service/Tools.py
@@ -0,0 +1,131 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging
+from typing import List, Optional, Tuple
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.proto.context_pb2 import (
+    ConfigRule, Constraint, ContextId, Device, Empty, EndPointId, Slice, SliceStatusEnum)
+from common.tools.context_queries.CheckType import device_type_is_network, endpoint_type_is_border
+from common.tools.context_queries.InterDomain import get_local_device_uuids
+from common.tools.grpc.ConfigRules import copy_config_rules
+from common.tools.grpc.Constraints import copy_constraints
+from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+def compute_slice_owner(
+    context_client : ContextClient, traversed_domains : List[Tuple[str, Device, bool, List[EndPointId]]]
+) -> Optional[str]:
+    traversed_domain_uuids = {traversed_domain[0] for traversed_domain in traversed_domains}
+
+    existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)))
+    existing_topology_uuids = {
+        topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids
+    }
+    existing_topology_uuids.discard(DEFAULT_TOPOLOGY_UUID)
+    existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID)
+
+    candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids)
+    if len(candidate_owner_uuids) != 1:
+        data = {
+            'traversed_domain_uuids' : [td_uuid for td_uuid in traversed_domain_uuids ],
+            'existing_topology_uuids': [et_uuid for et_uuid in existing_topology_uuids],
+            'candidate_owner_uuids'  : [co_uuid for co_uuid in candidate_owner_uuids  ],
+        }
+        LOGGER.warning('Unable to identify slice owner: {:s}'.format(json.dumps(data)))
+        return None
+
+    return candidate_owner_uuids.pop()
+
+def compose_slice(
+    context_uuid : str, slice_uuid : str, endpoint_ids : List[EndPointId], constraints : List[Constraint] = [],
+    config_rules : List[ConfigRule] = [], owner_uuid : Optional[str] = None
+) -> Slice:
+    slice_ = Slice()
+    slice_.slice_id.context_id.context_uuid.uuid = context_uuid             # pylint: disable=no-member
+    slice_.slice_id.slice_uuid.uuid = slice_uuid                            # pylint: disable=no-member
+    slice_.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED  # pylint: disable=no-member
+
+    if owner_uuid is not None:
+        slice_.slice_owner.owner_uuid.uuid = owner_uuid                     # pylint: disable=no-member
+
+    if len(endpoint_ids) >= 2:
+        slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[0])           # pylint: disable=no-member
+        slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[-1])          # pylint: disable=no-member
+
+    if len(constraints) > 0:
+        copy_constraints(constraints, slice_.slice_constraints)             # pylint: disable=no-member
+
+    if len(config_rules) > 0:
+        copy_config_rules(config_rules, slice_.slice_config.config_rules)   # pylint: disable=no-member
+
+    return slice_
+
+def map_abstract_endpoints_to_real(
+    context_client : ContextClient, local_domain_uuid : str, abstract_endpoint_ids : List[EndPointId]
+) -> List[EndPointId]:
+
+    local_device_uuids = get_local_device_uuids(context_client)
+    all_devices = context_client.ListDevices(Empty())
+
+    map_endpoints_to_devices = dict()
+    for device in all_devices.devices:
+        LOGGER.info('[map_abstract_endpoints_to_real] Checking device {:s}'.format(
+            grpc_message_to_json_string(device)))
+
+        if device_type_is_network(device.device_type):
+            LOGGER.info('[map_abstract_endpoints_to_real]   Ignoring network device')
+            continue
+        device_uuid = device.device_id.device_uuid.uuid
+        if device_uuid not in local_device_uuids:
+            LOGGER.info('[map_abstract_endpoints_to_real]   Ignoring non-local device')
+            continue
+
+        for endpoint in device.device_endpoints:
+            LOGGER.info('[map_abstract_endpoints_to_real]   Checking endpoint {:s}'.format(
+                grpc_message_to_json_string(endpoint)))
+            endpoint_id = endpoint.endpoint_id
+            device_uuid = endpoint_id.device_id.device_uuid.uuid
+            endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+            map_endpoints_to_devices[(device_uuid, endpoint_uuid)] = endpoint_id
+            if endpoint_type_is_border(endpoint.endpoint_type):
+                map_endpoints_to_devices[(local_domain_uuid, endpoint_uuid)] = endpoint_id
+
+    LOGGER.info('[map_abstract_endpoints_to_real] map_endpoints_to_devices={:s}'.format(
+        str({
+            endpoint_tuple:grpc_message_to_json(endpoint_id)
+            for endpoint_tuple,endpoint_id in map_endpoints_to_devices.items()
+        })))
+
+    # map abstract device/endpoints to real device/endpoints
+    real_endpoint_ids = []
+    for endpoint_id in abstract_endpoint_ids:
+        LOGGER.info('[map_abstract_endpoints_to_real] Mapping endpoint_id {:s} ...'.format(
+                grpc_message_to_json_string(endpoint_id)))
+        device_uuid = endpoint_id.device_id.device_uuid.uuid
+        endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+        _endpoint_id = map_endpoints_to_devices.get((device_uuid, endpoint_uuid))
+        if _endpoint_id is None:
+            LOGGER.warning('map_endpoints_to_devices={:s}'.format(str(map_endpoints_to_devices)))
+            MSG = 'Unable to map abstract EndPoint({:s}) to real one.'
+            raise Exception(MSG.format(grpc_message_to_json_string(endpoint_id)))
+        
+        LOGGER.info('[map_abstract_endpoints_to_real] ... to endpoint_id {:s}'.format(
+                grpc_message_to_json_string(_endpoint_id)))
+        real_endpoint_ids.append(_endpoint_id)
+
+    return real_endpoint_ids
diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py
index c0a078f4ded85ab957011d21d56c97c8d303dc2a..bcbda8dfda05ec7b245b5939d8a3afc4b979562f 100644
--- a/src/interdomain/service/__main__.py
+++ b/src/interdomain/service/__main__.py
@@ -17,7 +17,8 @@ from prometheus_client import start_http_server
 from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
-    get_service_port_grpc, wait_for_environment_variables)
+    wait_for_environment_variables)
+from .topology_abstractor.TopologyAbstractor import TopologyAbstractor
 from .InterdomainService import InterdomainService
 from .RemoteDomainClients import RemoteDomainClients
 
@@ -32,14 +33,18 @@ def main():
     global LOGGER # pylint: disable=global-statement
 
     log_level = get_log_level()
-    logging.basicConfig(level=log_level)
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
-        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
-        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
-        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.CONTEXT,  ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.SLICE,    ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.SLICE,    ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.DLT,      ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.DLT,      ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     signal.signal(signal.SIGINT,  signal_handler)
@@ -58,14 +63,19 @@ def main():
     grpc_service = InterdomainService(remote_domain_clients)
     grpc_service.start()
 
+    # Subscribe to Context Events
+    topology_abstractor = TopologyAbstractor()
+    topology_abstractor.start()
+
     # TODO: improve with configuration the definition of the remote peers
-    interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)
-    remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc)
+    #interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)
+    #remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc)
 
     # Wait for Ctrl+C or termination signal
     while not terminate.wait(timeout=0.1): pass
 
     LOGGER.info('Terminating...')
+    topology_abstractor.stop()
     grpc_service.stop()
 
     LOGGER.info('Bye')
diff --git a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..01ba90ef5a6cb098e6d419fa0d6abb450893f8c6
--- /dev/null
+++ b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py
@@ -0,0 +1,153 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
+from common.proto.context_pb2 import (
+    AuthenticationResult, Slice, SliceId, SliceStatus, SliceStatusEnum, TeraFlowController)
+from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer
+#from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from interdomain.service.RemoteDomainClients import RemoteDomainClients
+from slice.client.SliceClient import SliceClient
+
+LOGGER = logging.getLogger(__name__)
+
+SERVICE_NAME = 'Interdomain'
+METHOD_NAMES = ['RequestSlice', 'Authenticate', 'LookUpSlice', 'OrderSliceFromCatalog', 'CreateSliceAndAddToCatalog']
+METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
+
+class InterdomainServiceServicerImpl(InterdomainServiceServicer):
+    def __init__(self, remote_domain_clients : RemoteDomainClients):
+        LOGGER.debug('Creating Servicer...')
+        self.remote_domain_clients = remote_domain_clients
+        LOGGER.debug('Servicer Created')
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
+        context_client = ContextClient()
+        slice_client = SliceClient()
+
+        domains_to_endpoints = {}
+        local_domain_uuid = None
+        for slice_endpoint_id in request.slice_endpoint_ids:
+            device_uuid = slice_endpoint_id.device_id.device_uuid.uuid
+            domain_uuid = device_uuid.split('@')[1]
+            endpoints = domains_to_endpoints.setdefault(domain_uuid, [])
+            endpoints.append(slice_endpoint_id)
+            if local_domain_uuid is None: local_domain_uuid = domain_uuid
+
+        reply = Slice()
+        reply.CopyFrom(request)
+
+        # decompose remote slices
+        for domain_uuid, slice_endpoint_ids in domains_to_endpoints.items():
+            if domain_uuid == local_domain_uuid: continue
+
+            remote_slice_request = Slice()
+            remote_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid
+            remote_slice_request.slice_id.slice_uuid.uuid = \
+                request.slice_id.slice_uuid.uuid + ':subslice@' + local_domain_uuid
+            remote_slice_request.slice_status.slice_status = request.slice_status.slice_status
+            for endpoint_id in slice_endpoint_ids:
+                slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add()
+                slice_endpoint_id.device_id.device_uuid.uuid = endpoint_id.device_id.device_uuid.uuid
+                slice_endpoint_id.endpoint_uuid.uuid = endpoint_id.endpoint_uuid.uuid
+
+            # add endpoint connecting to remote domain
+            if domain_uuid == 'D1':
+                slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add()
+                slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1'
+                slice_endpoint_id.endpoint_uuid.uuid = '2/1'
+            elif domain_uuid == 'D2':
+                slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add()
+                slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2'
+                slice_endpoint_id.endpoint_uuid.uuid = '2/1'
+
+            interdomain_client = self.remote_domain_clients.get_peer('remote-teraflow')
+            remote_slice_reply = interdomain_client.LookUpSlice(remote_slice_request)
+            if remote_slice_reply == remote_slice_request.slice_id: # pylint: disable=no-member
+                # successful case
+                remote_slice = interdomain_client.OrderSliceFromCatalog(remote_slice_request)
+                if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE:
+                    raise Exception('Remote Slice creation failed. Wrong Slice status returned')
+            else:
+                # not in catalog
+                remote_slice = interdomain_client.CreateSliceAndAddToCatalog(remote_slice_request)
+                if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE:
+                    raise Exception('Remote Slice creation failed. Wrong Slice status returned')
+
+            #context_client.SetSlice(remote_slice)
+            #subslice_id = reply.slice_subslice_ids.add()
+            #subslice_id.CopyFrom(remote_slice.slice_id)
+
+        local_slice_request = Slice()
+        local_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid
+        local_slice_request.slice_id.slice_uuid.uuid = request.slice_id.slice_uuid.uuid + ':subslice'
+        local_slice_request.slice_status.slice_status = request.slice_status.slice_status
+        for endpoint_id in domains_to_endpoints[local_domain_uuid]:
+            slice_endpoint_id = local_slice_request.slice_endpoint_ids.add()
+            slice_endpoint_id.CopyFrom(endpoint_id)
+
+        # add endpoint connecting to remote domain
+        if local_domain_uuid == 'D1':
+            slice_endpoint_id = local_slice_request.slice_endpoint_ids.add()
+            slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1'
+            slice_endpoint_id.endpoint_uuid.uuid = '2/1'
+        elif local_domain_uuid == 'D2':
+            slice_endpoint_id = local_slice_request.slice_endpoint_ids.add()
+            slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2'
+            slice_endpoint_id.endpoint_uuid.uuid = '2/1'
+
+        local_slice_reply = slice_client.CreateSlice(local_slice_request)
+        if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member
+            raise Exception('Local Slice creation failed. Wrong Slice Id was returned')
+
+        subslice_id = reply.slice_subslice_ids.add()
+        subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid
+        subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid
+
+        context_client.SetSlice(reply)
+        return reply.slice_id
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult:
+        auth_result = AuthenticationResult()
+        auth_result.context_id.CopyFrom(request.context_id) # pylint: disable=no-member
+        auth_result.authenticated = True
+        return auth_result
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def LookUpSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
+        try:
+            context_client = ContextClient()
+            slice_ = context_client.GetSlice(request.slice_id)
+            return slice_.slice_id
+        except grpc.RpcError:
+            #LOGGER.exception('Unable to get slice({:s})'.format(grpc_message_to_json_string(request.slice_id)))
+            return SliceId()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def OrderSliceFromCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice:
+        raise NotImplementedError('OrderSliceFromCatalog')
+        #return Slice()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice:
+        context_client = ContextClient()
+        slice_client = SliceClient()
+        reply = slice_client.CreateSlice(request)
+        if reply != request.slice_id: # pylint: disable=no-member
+            raise Exception('Slice creation failed. Wrong Slice Id was returned')
+        return context_client.GetSlice(request.slice_id)
diff --git a/src/interdomain/service/topology_abstractor/AbstractDevice.py b/src/interdomain/service/topology_abstractor/AbstractDevice.py
new file mode 100644
index 0000000000000000000000000000000000000000..3448c1036d4ef086d679d5f4308ae95decfbffa7
--- /dev/null
+++ b/src/interdomain/service/topology_abstractor/AbstractDevice.py
@@ -0,0 +1,190 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, logging
+from typing import Dict, Optional
+from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import (
+    ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, EndPoint)
+from common.tools.context_queries.CheckType import (
+    device_type_is_datacenter, device_type_is_network, endpoint_type_is_border)
+from common.tools.context_queries.Device import add_device_to_topology, get_existing_device_uuids
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device, json_device_id
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+class AbstractDevice:
+    def __init__(self, device_uuid : str, device_type : DeviceTypeEnum):
+        self.__context_client = ContextClient()
+        self.__device_uuid : str = device_uuid
+        self.__device_type : DeviceTypeEnum = device_type
+        self.__device : Optional[Device] = None
+        self.__device_id : Optional[DeviceId] = None
+
+        # Dict[device_uuid, Dict[endpoint_uuid, abstract EndPoint]]
+        self.__device_endpoint_to_abstract : Dict[str, Dict[str, EndPoint]] = dict()
+
+        # Dict[endpoint_uuid, device_uuid]
+        self.__abstract_endpoint_to_device : Dict[str, str] = dict()
+
+    @property
+    def uuid(self) -> str: return self.__device_uuid
+
+    @property
+    def device_id(self) -> Optional[DeviceId]: return self.__device_id
+
+    @property
+    def device(self) -> Optional[Device]: return self.__device
+
+    def get_endpoint(self, device_uuid : str, endpoint_uuid : str) -> Optional[EndPoint]:
+        return self.__device_endpoint_to_abstract.get(device_uuid, {}).get(endpoint_uuid)
+
+    def initialize(self) -> bool:
+        if self.__device is not None: return False
+
+        existing_device_uuids = get_existing_device_uuids(self.__context_client)
+        create_abstract_device = self.__device_uuid not in existing_device_uuids
+
+        if create_abstract_device:
+            self._create_empty()
+        else:
+            self._load_existing()
+
+        is_datacenter = device_type_is_datacenter(self.__device_type)
+        is_network = device_type_is_network(self.__device_type)
+        if is_datacenter or is_network:
+            # Add abstract device to topologies [INTERDOMAIN_TOPOLOGY_UUID]
+            context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+            topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID]
+            for topology_uuid in topology_uuids:
+                add_device_to_topology(self.__context_client, context_id, topology_uuid, self.__device_uuid)
+
+        # seems not needed; to be removed in future releases
+        #if is_datacenter and create_abstract_device:
+        #    dc_device = self.__context_client.GetDevice(DeviceId(**json_device_id(self.__device_uuid)))
+        #    if device_type_is_datacenter(dc_device.device_type):
+        #        self.update_endpoints(dc_device)
+        #elif is_network:
+        #    devices_in_admin_topology = get_devices_in_topology(
+        #        self.__context_client, context_id, DEFAULT_TOPOLOGY_UUID)
+        #    for device in devices_in_admin_topology:
+        #        if device_type_is_datacenter(device.device_type): continue
+        #        self.update_endpoints(device)
+
+        return True
+
+    def _create_empty(self) -> None:
+        device_uuid = self.__device_uuid
+
+        device = Device(**json_device(
+            device_uuid, self.__device_type.value, DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED,
+            endpoints=[], config_rules=[], drivers=[DeviceDriverEnum.DEVICEDRIVER_UNDEFINED]
+        ))
+        self.__context_client.SetDevice(device)
+        self.__device = device
+        self.__device_id = self.__device.device_id
+
+    def _load_existing(self) -> None:
+        self.__device_endpoint_to_abstract = dict()
+        self.__abstract_endpoint_to_device = dict()
+
+        self.__device_id = DeviceId(**json_device_id(self.__device_uuid))
+        self.__device = self.__context_client.GetDevice(self.__device_id)
+        self.__device_type = self.__device.device_type
+        device_uuid = self.__device_id.device_uuid.uuid
+
+        device_type = self.__device_type
+        is_datacenter = device_type_is_datacenter(device_type)
+        is_network = device_type_is_network(device_type)
+        if not is_datacenter and not is_network:
+            LOGGER.warning('Unsupported InterDomain Device Type: {:s}'.format(str(device_type)))
+            return
+
+        # for each endpoint in abstract device, populate internal data structures and mappings
+        for interdomain_endpoint in self.__device.device_endpoints:
+            endpoint_uuid : str = interdomain_endpoint.endpoint_id.endpoint_uuid.uuid
+
+            if is_network:
+                endpoint_uuid,device_uuid = endpoint_uuid.split('@', maxsplit=1)
+
+            self.__device_endpoint_to_abstract\
+                .setdefault(device_uuid, {}).setdefault(endpoint_uuid, interdomain_endpoint)
+            self.__abstract_endpoint_to_device\
+                .setdefault(endpoint_uuid, device_uuid)
+
+    def _update_endpoint_type(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> bool:
+        device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {})
+        interdomain_endpoint = device_endpoint_to_abstract.get(endpoint_uuid)
+        interdomain_endpoint_type = interdomain_endpoint.endpoint_type
+        if endpoint_type == interdomain_endpoint_type: return False
+        interdomain_endpoint.endpoint_type = endpoint_type
+        return True
+
+    def _add_endpoint(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> EndPoint:
+        interdomain_endpoint = self.__device.device_endpoints.add()
+        interdomain_endpoint.endpoint_id.device_id.CopyFrom(self.__device_id)
+        interdomain_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid
+        interdomain_endpoint.endpoint_type = endpoint_type
+
+        self.__device_endpoint_to_abstract\
+            .setdefault(device_uuid, {}).setdefault(endpoint_uuid, interdomain_endpoint)
+        self.__abstract_endpoint_to_device\
+            .setdefault(endpoint_uuid, device_uuid)
+
+        return interdomain_endpoint
+
+    def _remove_endpoint(
+        self, device_uuid : str, endpoint_uuid : str, interdomain_endpoint : EndPoint
+    ) -> None:
+        self.__abstract_endpoint_to_device.pop(endpoint_uuid, None)
+        device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {})
+        device_endpoint_to_abstract.pop(endpoint_uuid, None)
+        self.__device.device_endpoints.remove(interdomain_endpoint)
+
+    def update_endpoints(self, device : Device) -> bool:
+        if device_type_is_datacenter(self.__device.device_type): return False
+
+        device_uuid = device.device_id.device_uuid.uuid
+        device_border_endpoint_uuids = {
+            endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.endpoint_type
+            for endpoint in device.device_endpoints
+            if endpoint_type_is_border(endpoint.endpoint_type)
+        }
+
+        updated = False
+
+        # for each border endpoint in abstract device that is not in device; remove from abstract device
+        device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {})
+        _device_endpoint_to_abstract = copy.deepcopy(device_endpoint_to_abstract)
+        for endpoint_uuid, interdomain_endpoint in _device_endpoint_to_abstract.items():
+            if endpoint_uuid in device_border_endpoint_uuids: continue
+            # remove interdomain endpoint that is not in device
+            self._remove_endpoint(device_uuid, endpoint_uuid, interdomain_endpoint)
+            updated = True
+
+        # for each border endpoint in device that is not in abstract device; add to abstract device
+        for endpoint_uuid,endpoint_type in device_border_endpoint_uuids.items():
+            # if already added; just check endpoint type is not modified
+            if endpoint_uuid in self.__abstract_endpoint_to_device:
+                updated = updated or self._update_endpoint_type(device_uuid, endpoint_uuid, endpoint_type)
+                continue
+
+            # otherwise, add it to the abstract device
+            self._add_endpoint(device_uuid, endpoint_uuid, endpoint_type)
+            updated = True
+
+        return updated
diff --git a/src/interdomain/service/topology_abstractor/AbstractLink.py b/src/interdomain/service/topology_abstractor/AbstractLink.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fe7b07b0708ebf8490cf4304646037973b05d56
--- /dev/null
+++ b/src/interdomain/service/topology_abstractor/AbstractLink.py
@@ -0,0 +1,126 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, logging
+from typing import Dict, List, Optional, Tuple
+from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.proto.context_pb2 import ContextId, EndPointId, Link, LinkId
+from common.tools.context_queries.Link import add_link_to_topology, get_existing_link_uuids
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Link import json_link, json_link_id
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+class AbstractLink:
+    def __init__(self, link_uuid : str):
+        self.__context_client = ContextClient()
+        self.__link_uuid : str = link_uuid
+        self.__link : Optional[Link] = None
+        self.__link_id : Optional[LinkId] = None
+
+        # Dict[(device_uuid, endpoint_uuid), abstract EndPointId]
+        self.__device_endpoint_to_abstract : Dict[Tuple[str, str], EndPointId] = dict()
+
+    @property
+    def uuid(self) -> str: return self.__link_uuid
+
+    @property
+    def link_id(self) -> Optional[LinkId]: return self.__link_id
+
+    @property
+    def link(self) -> Optional[Link]: return self.__link
+
+    @staticmethod
+    def compose_uuid(
+        device_uuid_a : str, endpoint_uuid_a : str, device_uuid_z : str, endpoint_uuid_z : str
+    ) -> str:
+        # sort endpoints lexicographically to prevent duplicities
+        link_endpoint_uuids = sorted([
+            (device_uuid_a, endpoint_uuid_a),
+            (device_uuid_z, endpoint_uuid_z)
+        ])
+        link_uuid = '{:s}/{:s}=={:s}/{:s}'.format(
+            link_endpoint_uuids[0][0], link_endpoint_uuids[0][1],
+            link_endpoint_uuids[1][0], link_endpoint_uuids[1][1])
+        return link_uuid
+
+    def initialize(self) -> bool:
+        if self.__link is not None: return False
+
+        existing_link_uuids = get_existing_link_uuids(self.__context_client)
+
+        create = self.__link_uuid not in existing_link_uuids
+        if create:
+            self._create_empty()
+        else:
+            self._load_existing()
+
+        # Add abstract link to topologies [INTERDOMAIN_TOPOLOGY_UUID]
+        context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+        topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID]
+        for topology_uuid in topology_uuids:
+            add_link_to_topology(self.__context_client, context_id, topology_uuid, self.__link_uuid)
+
+        return create
+
+    def _create_empty(self) -> None:
+        link = Link(**json_link(self.__link_uuid, endpoint_ids=[]))
+        self.__context_client.SetLink(link)
+        self.__link = link
+        self.__link_id = self.__link.link_id
+    
+    def _load_existing(self) -> None:
+        self.__link_id = LinkId(**json_link_id(self.__link_uuid))
+        self.__link = self.__context_client.GetLink(self.__link_id)
+
+        self.__device_endpoint_to_abstract = dict()
+
+        # for each endpoint in abstract link, populate internal data structures and mappings
+        for endpoint_id in self.__link.link_endpoint_ids:
+            device_uuid : str = endpoint_id.device_id.device_uuid.uuid
+            endpoint_uuid : str = endpoint_id.endpoint_uuid.uuid
+            self.__device_endpoint_to_abstract.setdefault((device_uuid, endpoint_uuid), endpoint_id)
+
+    def _add_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None:
+        endpoint_id = self.__link.link_endpoint_ids.add()
+        endpoint_id.device_id.device_uuid.uuid = device_uuid
+        endpoint_id.endpoint_uuid.uuid = endpoint_uuid
+        self.__device_endpoint_to_abstract.setdefault((device_uuid, endpoint_uuid), endpoint_id)
+
+    def _remove_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None:
+        device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {})
+        endpoint_id = device_endpoint_to_abstract.pop(endpoint_uuid, None)
+        if endpoint_id is not None: self.__link.link_endpoint_ids.remove(endpoint_id)
+
+    def update_endpoints(self, link_endpoint_uuids : List[Tuple[str, str]] = []) -> bool:
+        updated = False
+
+        # for each endpoint in abstract link that is not in link; remove from abstract link
+        device_endpoint_to_abstract = copy.deepcopy(self.__device_endpoint_to_abstract)
+        for device_uuid, endpoint_uuid in device_endpoint_to_abstract.keys():
+            if (device_uuid, endpoint_uuid) in link_endpoint_uuids: continue
+            # remove endpoint_id that is not in link
+            self._remove_endpoint(device_uuid, endpoint_uuid)
+            updated = True
+
+        # for each endpoint in link that is not in abstract link; add to abstract link
+        for device_uuid, endpoint_uuid in link_endpoint_uuids:
+            # if already added; just check endpoint type is not modified
+            if (device_uuid, endpoint_uuid) in self.__device_endpoint_to_abstract: continue
+            # otherwise, add it to the abstract device
+            self._add_endpoint(device_uuid, endpoint_uuid)
+            updated = True
+
+        return updated
diff --git a/src/interdomain/service/topology_abstractor/DltRecordSender.py b/src/interdomain/service/topology_abstractor/DltRecordSender.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7e3d81dded18c7406b54389cbe128c0fd27d7b4
--- /dev/null
+++ b/src/interdomain/service/topology_abstractor/DltRecordSender.py
@@ -0,0 +1,91 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Dict, List, Tuple
+from common.proto.context_pb2 import Device, Link, Service, Slice, TopologyId
+from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId
+from context.client.ContextClient import ContextClient
+from dlt.connector.client.DltConnectorClient import DltConnectorClient
+from .Types import DltRecordTypes
+
+LOGGER = logging.getLogger(__name__)
+
+class DltRecordSender:
+    def __init__(self, context_client : ContextClient, dlt_connector_client : DltConnectorClient) -> None:
+        self.context_client = context_client
+        self.dlt_connector_client = dlt_connector_client
+        self.dlt_record_uuids : List[str] = list()
+        self.dlt_record_uuid_to_data : Dict[str, Tuple[TopologyId, DltRecordTypes]] = dict()
+
+    def _add_record(self, record_uuid : str, data : Tuple[TopologyId, DltRecordTypes]) -> None:
+        if record_uuid in self.dlt_record_uuid_to_data: return
+        self.dlt_record_uuid_to_data[record_uuid] = data
+        self.dlt_record_uuids.append(record_uuid)
+
+    def add_device(self, topology_id : TopologyId, device : Device) -> None:
+        topology_uuid = topology_id.topology_uuid.uuid
+        device_uuid = device.device_id.device_uuid.uuid
+        record_uuid = '{:s}:device:{:s}'.format(topology_uuid, device_uuid)
+        self._add_record(record_uuid, (topology_id, device))
+
+    def add_link(self, topology_id : TopologyId, link : Link) -> None:
+        topology_uuid = topology_id.topology_uuid.uuid
+        link_uuid = link.link_id.link_uuid.uuid
+        record_uuid = '{:s}:link:{:s}'.format(topology_uuid, link_uuid)
+        self._add_record(record_uuid, (topology_id, link))
+
+    def add_service(self, topology_id : TopologyId, service : Service) -> None:
+        topology_uuid = topology_id.topology_uuid.uuid
+        context_uuid = service.service_id.context_id.context_uuid.uuid
+        service_uuid = service.service_id.service_uuid.uuid
+        record_uuid = '{:s}:service:{:s}/{:s}'.format(topology_uuid, context_uuid, service_uuid)
+        self._add_record(record_uuid, (topology_id, service))
+
+    def add_slice(self, topology_id : TopologyId, slice_ : Slice) -> None:
+        topology_uuid = topology_id.topology_uuid.uuid
+        context_uuid = slice_.slice_id.context_id.context_uuid.uuid
+        slice_uuid = slice_.slice_id.slice_uuid.uuid
+        record_uuid = '{:s}:slice:{:s}/{:s}'.format(topology_uuid, context_uuid, slice_uuid)
+        self._add_record(record_uuid, (topology_id, slice_))
+
+    def commit(self) -> None:
+        for dlt_record_uuid in self.dlt_record_uuids:
+            topology_id,dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid]
+            if isinstance(dlt_record, Device):
+                device_id = self.context_client.SetDevice(dlt_record)
+                dlt_device_id = DltDeviceId()
+                dlt_device_id.topology_id.CopyFrom(topology_id)     # pylint: disable=no-member
+                dlt_device_id.device_id.CopyFrom(device_id)         # pylint: disable=no-member
+                self.dlt_connector_client.RecordDevice(dlt_device_id)
+            elif isinstance(dlt_record, Link):
+                link_id = self.context_client.SetLink(dlt_record)
+                dlt_link_id = DltLinkId()
+                dlt_link_id.topology_id.CopyFrom(topology_id)       # pylint: disable=no-member
+                dlt_link_id.link_id.CopyFrom(link_id)               # pylint: disable=no-member
+                self.dlt_connector_client.RecordLink(dlt_link_id)
+            elif isinstance(dlt_record, Service):
+                service_id = self.context_client.SetService(dlt_record)
+                dlt_service_id = DltServiceId()
+                dlt_service_id.topology_id.CopyFrom(topology_id)    # pylint: disable=no-member
+                dlt_service_id.service_id.CopyFrom(service_id)      # pylint: disable=no-member
+                self.dlt_connector_client.RecordService(dlt_service_id)
+            elif isinstance(dlt_record, Slice):
+                slice_id = self.context_client.SetSlice(dlt_record)
+                dlt_slice_id = DltSliceId()
+                dlt_slice_id.topology_id.CopyFrom(topology_id)      # pylint: disable=no-member
+                dlt_slice_id.slice_id.CopyFrom(slice_id)            # pylint: disable=no-member
+                self.dlt_connector_client.RecordSlice(dlt_slice_id)
+            else:
+                LOGGER.error('Unsupported Record({:s})'.format(str(dlt_record)))
diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..5729fe733c3a9a8f73f188b40338160ab286998b
--- /dev/null
+++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
@@ -0,0 +1,288 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, threading
+from typing import Dict, Optional, Tuple
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import (
+    ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPoint, EndPointId, Link, LinkEvent, TopologyId,
+    TopologyEvent)
+from common.tools.context_queries.CheckType import (
+    device_type_is_datacenter, device_type_is_network, endpoint_type_is_border)
+from common.tools.context_queries.Context import create_context
+from common.tools.context_queries.Device import get_devices_in_topology, get_uuids_of_devices_in_topology
+from common.tools.context_queries.Link import get_links_in_topology
+from common.tools.context_queries.Topology import create_missing_topologies
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
+from dlt.connector.client.DltConnectorClient import DltConnectorClient
+from .AbstractDevice import AbstractDevice
+from .AbstractLink import AbstractLink
+from .DltRecordSender import DltRecordSender
+from .Types import EventTypes
+
+LOGGER = logging.getLogger(__name__)
+
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID))
+
+class TopologyAbstractor(threading.Thread):
+    def __init__(self) -> None:
+        super().__init__(daemon=True)
+        self.terminate = threading.Event()
+
+        self.context_client = ContextClient()
+        self.dlt_connector_client = DltConnectorClient()
+        self.context_event_collector = EventsCollector(self.context_client)
+
+        self.real_to_abstract_device_uuid : Dict[str, str] = dict()
+        self.real_to_abstract_link_uuid : Dict[str, str] = dict()
+
+        self.abstract_device_to_topology_id : Dict[str, TopologyId] = dict()
+        self.abstract_link_to_topology_id : Dict[str, TopologyId] = dict()
+
+        self.abstract_devices : Dict[str, AbstractDevice] = dict()
+        self.abstract_links : Dict[Tuple[str,str], AbstractLink] = dict()
+
+    def stop(self):
+        self.terminate.set()
+
+    def run(self) -> None:
+        self.context_client.connect()
+        create_context(self.context_client, DEFAULT_CONTEXT_UUID)
+        topology_uuids = [DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID]
+        create_missing_topologies(self.context_client, ADMIN_CONTEXT_ID, topology_uuids)
+
+        self.dlt_connector_client.connect()
+        self.context_event_collector.start()
+
+        while not self.terminate.is_set():
+            event = self.context_event_collector.get_event(timeout=0.1)
+            if event is None: continue
+            #if self.ignore_event(event): continue
+            LOGGER.info('Processing Event({:s})...'.format(grpc_message_to_json_string(event)))
+            self.update_abstraction(event)
+
+        self.context_event_collector.stop()
+        self.context_client.close()
+        self.dlt_connector_client.close()
+
+    #def ignore_event(self, event : EventTypes) -> List[DltRecordIdTypes]:
+    #    # TODO: filter events resulting from abstraction computation
+    #    # TODO: filter events resulting from updating remote abstractions
+    #    if self.own_context_id is None: return False
+    #    own_context_uuid = self.own_context_id.context_uuid.uuid
+    #
+    #    if isinstance(event, ContextEvent):
+    #        context_uuid = event.context_id.context_uuid.uuid
+    #        return context_uuid == own_context_uuid
+    #    elif isinstance(event, TopologyEvent):
+    #        context_uuid = event.topology_id.context_id.context_uuid.uuid
+    #        if context_uuid != own_context_uuid: return True
+    #        topology_uuid = event.topology_id.topology_uuid.uuid
+    #        if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: return True
+    #
+    #    return False
+
+    def _get_or_create_abstract_device(
+        self, device_uuid : str, device_type : DeviceTypeEnum, dlt_record_sender : DltRecordSender,
+        abstract_topology_id : TopologyId
+    ) -> AbstractDevice:
+        abstract_device = self.abstract_devices.get(device_uuid)
+        changed = False
+        if abstract_device is None:
+            abstract_device = AbstractDevice(device_uuid, device_type)
+            changed = abstract_device.initialize()
+            if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device)
+            self.abstract_devices[device_uuid] = abstract_device
+            self.abstract_device_to_topology_id[device_uuid] = abstract_topology_id
+        return abstract_device
+
+    def _update_abstract_device(
+        self, device : Device, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId,
+        abstract_device_uuid : Optional[str] = None
+    ) -> None:
+        device_uuid = device.device_id.device_uuid.uuid
+        if device_type_is_datacenter(device.device_type):
+            abstract_device_uuid = device_uuid
+            abstract_device = self._get_or_create_abstract_device(
+                device_uuid, DeviceTypeEnum.EMULATED_DATACENTER, dlt_record_sender, abstract_topology_id)
+        elif device_type_is_network(device.device_type):
+            LOGGER.warning('device_type is network; not implemented')
+            return
+        else:
+            abstract_device = self._get_or_create_abstract_device(
+                abstract_device_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id)
+        self.real_to_abstract_device_uuid[device_uuid] = abstract_device_uuid
+        changed = abstract_device.update_endpoints(device)
+        if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device)
+
+    def _get_or_create_abstract_link(
+        self, link_uuid : str, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId
+    ) -> AbstractLink:
+        abstract_link = self.abstract_links.get(link_uuid)
+        changed = False
+        if abstract_link is None:
+            abstract_link = AbstractLink(link_uuid)
+            changed = abstract_link.initialize()
+            if changed: dlt_record_sender.add_link(abstract_topology_id, abstract_link.link)
+            self.abstract_links[link_uuid] = abstract_link
+            self.abstract_link_to_topology_id[link_uuid] = abstract_topology_id
+        return abstract_link
+
+    def _get_link_endpoint_data(self, endpoint_id : EndPointId) -> Optional[Tuple[AbstractDevice, EndPoint]]:
+        device_uuid : str = endpoint_id.device_id.device_uuid.uuid
+        endpoint_uuid : str = endpoint_id.endpoint_uuid.uuid
+        abstract_device_uuid = self.real_to_abstract_device_uuid.get(device_uuid)
+        if abstract_device_uuid is None: return None
+        abstract_device = self.abstract_devices.get(abstract_device_uuid)
+        if abstract_device is None: return None
+        endpoint = abstract_device.get_endpoint(device_uuid, endpoint_uuid)
+        if endpoint is None: return None
+        return abstract_device, endpoint
+
+    def _compute_abstract_link(self, link : Link) -> Optional[str]:
+        if len(link.link_endpoint_ids) != 2: return None
+
+        link_endpoint_data_A = self._get_link_endpoint_data(link.link_endpoint_ids[0])
+        if link_endpoint_data_A is None: return None
+        abstract_device_A, endpoint_A = link_endpoint_data_A
+        if not endpoint_type_is_border(endpoint_A.endpoint_type): return None
+
+        link_endpoint_data_Z = self._get_link_endpoint_data(link.link_endpoint_ids[-1])
+        if link_endpoint_data_Z is None: return None
+        abstract_device_Z, endpoint_Z = link_endpoint_data_Z
+        if not endpoint_type_is_border(endpoint_Z.endpoint_type): return None
+
+        link_uuid = AbstractLink.compose_uuid(
+            abstract_device_A.uuid, endpoint_A.endpoint_id.endpoint_uuid.uuid,
+            abstract_device_Z.uuid, endpoint_Z.endpoint_id.endpoint_uuid.uuid
+        )
+
+        # sort endpoints lexicographically to prevent duplicities
+        link_endpoint_uuids = sorted([
+            (abstract_device_A.uuid, endpoint_A.endpoint_id.endpoint_uuid.uuid),
+            (abstract_device_Z.uuid, endpoint_Z.endpoint_id.endpoint_uuid.uuid)
+        ])
+
+        return link_uuid, link_endpoint_uuids
+
+    def _update_abstract_link(
+        self, link : Link, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId
+    ) -> None:
+        abstract_link_specs = self._compute_abstract_link(link)
+        if abstract_link_specs is None: return
+        abstract_link_uuid, link_endpoint_uuids = abstract_link_specs
+
+        abstract_link = self._get_or_create_abstract_link(abstract_link_uuid, dlt_record_sender, abstract_topology_id)
+        link_uuid = link.link_id.link_uuid.uuid
+        self.real_to_abstract_link_uuid[link_uuid] = abstract_link_uuid
+        changed = abstract_link.update_endpoints(link_endpoint_uuids)
+        if changed: dlt_record_sender.add_link(abstract_topology_id, abstract_link.link)
+
+    def _infer_abstract_links(self, device : Device, dlt_record_sender : DltRecordSender) -> None:
+        device_uuid = device.device_id.device_uuid.uuid
+
+        interdomain_device_uuids = get_uuids_of_devices_in_topology(
+            self.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
+
+        for endpoint in device.device_endpoints:
+            if not endpoint_type_is_border(endpoint.endpoint_type): continue
+            endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+
+            abstract_link_uuid = AbstractLink.compose_uuid(device_uuid, endpoint_uuid, endpoint_uuid, device_uuid)
+            if abstract_link_uuid in self.abstract_links: continue
+
+            if endpoint_uuid not in interdomain_device_uuids: continue
+            remote_device = self.context_client.GetDevice(DeviceId(**json_device_id(endpoint_uuid)))
+            remote_device_border_endpoint_uuids = {
+                endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.endpoint_type
+                for endpoint in remote_device.device_endpoints
+                if endpoint_type_is_border(endpoint.endpoint_type)
+            }
+            if device_uuid not in remote_device_border_endpoint_uuids: continue
+
+            link_endpoint_uuids = sorted([(device_uuid, endpoint_uuid), (endpoint_uuid, device_uuid)])
+
+            abstract_link = self._get_or_create_abstract_link(
+                abstract_link_uuid, dlt_record_sender, INTERDOMAIN_TOPOLOGY_ID)
+            changed = abstract_link.update_endpoints(link_endpoint_uuids)
+            if changed: dlt_record_sender.add_link(INTERDOMAIN_TOPOLOGY_ID, abstract_link.link)
+
+    def update_abstraction(self, event : EventTypes) -> None:
+        dlt_record_sender = DltRecordSender(self.context_client, self.dlt_connector_client)
+
+        if isinstance(event, ContextEvent):
+            LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event)))
+
+        elif isinstance(event, TopologyEvent):
+            topology_id = event.topology_id
+            topology_uuid = topology_id.topology_uuid.uuid
+            context_id = topology_id.context_id
+            context_uuid = context_id.context_uuid.uuid
+            topology_uuids = {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID}
+            if (context_uuid == DEFAULT_CONTEXT_UUID) and (topology_uuid not in topology_uuids):
+                abstract_topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=ADMIN_CONTEXT_ID))
+                self._get_or_create_abstract_device(
+                    topology_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id)
+
+                devices = get_devices_in_topology(self.context_client, context_id, topology_uuid)
+                for device in devices:
+                    self._update_abstract_device(
+                        device, dlt_record_sender, abstract_topology_id, abstract_device_uuid=topology_uuid)
+
+                links = get_links_in_topology(self.context_client, context_id, topology_uuid)
+                for link in links:
+                    self._update_abstract_link(link, dlt_record_sender, abstract_topology_id)
+                
+                for device in devices:
+                    self._infer_abstract_links(device, dlt_record_sender)
+
+            else:
+                LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event)))
+            
+        elif isinstance(event, DeviceEvent):
+            device_id = event.device_id
+            device_uuid = device_id.device_uuid.uuid
+            abstract_device_uuid = self.real_to_abstract_device_uuid.get(device_uuid)
+            device = self.context_client.GetDevice(device_id)
+            if abstract_device_uuid is None:
+                LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event)))
+            else:
+                abstract_topology_id = self.abstract_device_to_topology_id[abstract_device_uuid]
+                self._update_abstract_device(
+                    device, dlt_record_sender, abstract_topology_id, abstract_device_uuid=abstract_device_uuid)
+
+            self._infer_abstract_links(device, dlt_record_sender)
+
+        elif isinstance(event, LinkEvent):
+            link_id = event.link_id
+            link_uuid = link_id.link_uuid.uuid
+            abstract_link_uuid = self.real_to_abstract_link_uuid.get(link_uuid)
+            if abstract_link_uuid is None:
+                LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event)))
+            else:
+                abstract_topology_id = self.abstract_link_to_topology_id[abstract_link_uuid]
+                link = self.context_client.GetLink(link_id)
+                self._update_abstract_link(link, dlt_record_sender, abstract_topology_id)
+
+        else:
+            LOGGER.warning('Unsupported Event({:s})'.format(grpc_message_to_json_string(event)))
+
+        dlt_record_sender.commit()
diff --git a/src/interdomain/service/topology_abstractor/Types.py b/src/interdomain/service/topology_abstractor/Types.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6a0fa7a1d7a564045b6e850c2b46cf313da52b7
--- /dev/null
+++ b/src/interdomain/service/topology_abstractor/Types.py
@@ -0,0 +1,25 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union
+from common.proto.context_pb2 import (
+    ConnectionEvent, ContextEvent, Device, DeviceEvent, DeviceId, Link, LinkEvent, LinkId, Service, ServiceEvent,
+    ServiceId, Slice, SliceEvent, SliceId, TopologyEvent)
+
+DltRecordIdTypes = Union[DeviceId, LinkId, SliceId, ServiceId]
+DltRecordTypes = Union[Device, Link, Slice, Service]
+
+EventTypes = Union[
+    ContextEvent, TopologyEvent, DeviceEvent, LinkEvent, ServiceEvent, SliceEvent, ConnectionEvent
+]
diff --git a/src/interdomain/service/topology_abstractor/__init__.py b/src/interdomain/service/topology_abstractor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/interdomain/service/topology_abstractor/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
index 1d55646abffcdb4a882167406ba046aca7bfa651..205306d0ec2d156a2050d1f95c5c1e990796e018 100644
--- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
+++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
@@ -13,11 +13,16 @@
 # limitations under the License.
 
 import grpc, logging
-from common.proto.context_pb2 import Empty
+from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.proto.context_pb2 import ContextId, Empty
 from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
 from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer
 from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
+from common.tools.context_queries.Device import get_devices_in_topology
+from common.tools.context_queries.Link import get_links_in_topology
+from common.tools.context_queries.InterDomain import is_inter_domain
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.service.algorithms.Factory import get_algorithm
 
@@ -27,6 +32,8 @@ SERVICE_NAME = 'PathComp'
 METHOD_NAMES = ['Compute']
 METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
 
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+
 class PathCompServiceServicerImpl(PathCompServiceServicer):
     def __init__(self) -> None:
         LOGGER.debug('Creating Servicer...')
@@ -38,11 +45,18 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
 
         context_client = ContextClient()
 
-        # TODO: add filtering of devices and links
-        # TODO: add contexts, topologies, and membership of devices/links in topologies
+        if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids):
+            devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
+            links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
+        else:
+            # TODO: improve filtering of devices and links
+            # TODO: add contexts, topologies, and membership of devices/links in topologies
+            devices = context_client.ListDevices(Empty())
+            links = context_client.ListLinks(Empty())
+
         algorithm = get_algorithm(request)
-        algorithm.add_devices(context_client.ListDevices(Empty()))
-        algorithm.add_links(context_client.ListLinks(Empty()))
+        algorithm.add_devices(devices)
+        algorithm.add_links(links)
         algorithm.add_service_requests(request)
 
         #LOGGER.debug('device_list = {:s}'  .format(str(algorithm.device_list  )))
diff --git a/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py
index d5f937fd207807ba650669ea9fb2395b2e21b164..e0a2441823627843f1e14bde905da4f82ed7a593 100644
--- a/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py
@@ -12,15 +12,42 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.proto.pathcomp_pb2 import Algorithm_ShortestPath
+from typing import Dict, Optional
+from common.proto.pathcomp_pb2 import Algorithm_ShortestPath, PathCompRequest
 from ._Algorithm import _Algorithm
 
 class ShortestPathAlgorithm(_Algorithm):
     def __init__(self, algorithm : Algorithm_ShortestPath, class_name=__name__) -> None:
         super().__init__('SP', False, class_name=class_name)
 
-    def add_service_requests(self, requested_services) -> None:
-        super().add_service_requests(requested_services)
+    def add_service_requests(self, request : PathCompRequest) -> None:
+        super().add_service_requests(request)
         for service_request in self.service_list:
             service_request['algId'    ] = self.algorithm_id
             service_request['syncPaths'] = self.sync_paths
+
+    def _single_device_request(self) -> Optional[Dict]:
+        if len(self.service_list) != 1: return None
+        service = self.service_list[0]
+        endpoint_ids = service['service_endpoints_ids']
+        if len(endpoint_ids) != 2: return None
+        if endpoint_ids[0]['device_id'] != endpoint_ids[-1]['device_id']: return None
+        return {'response-list': [{
+            'serviceId': service['serviceId'],
+            'service_endpoints_ids': [endpoint_ids[0], endpoint_ids[-1]],
+            'path': [{
+                # not used by now
+                #'path-capacity': {'total-size': {'value': 200, 'unit': 0}},
+                #'path-latency': {'fixed-latency-characteristic': '2.000000'},
+                #'path-cost': {'cost-name': '', 'cost-value': '1.000000', 'cost-algorithm': '0.000000'},
+                'devices': [endpoint_ids[0], endpoint_ids[-1]]
+            }]
+        }]}
+
+    def execute(self, dump_request_filename : Optional[str] = None, dump_reply_filename : Optional[str] = None) -> None:
+        # if request is composed of a single service with single device (not supported by backend),
+        # produce synthetic reply directly
+        self.json_reply = self._single_device_request()
+        if self.json_reply is None:
+            # otherwise, follow normal logic through the backend
+            return super().execute(dump_request_filename, dump_reply_filename)
diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
index 43811c0687fa64206cf4491750411f0aa2994ac6..3833642457bc5f8c2ba7b7d09f384a87dfabe41d 100644
--- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import json, logging, requests
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List, Optional, Tuple, Union
 from common.proto.context_pb2 import (
     ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum,
     ServiceTypeEnum)
@@ -23,7 +23,8 @@ from pathcomp.frontend.Config import BACKEND_URL
 from pathcomp.frontend.service.algorithms.tools.ConstantsMappings import DEVICE_LAYER_TO_SERVICE_TYPE, DeviceLayerEnum
 from .tools.EroPathToHops import eropath_to_hops
 from .tools.ComposeRequest import compose_device, compose_link, compose_service
-from .tools.ComputeSubServices import convert_explicit_path_hops_to_connections
+from .tools.ComputeSubServices import (
+    convert_explicit_path_hops_to_connections, convert_explicit_path_hops_to_plain_connection)
 
 class _Algorithm:
     def __init__(self, algorithm_id : str, sync_paths : bool, class_name=__name__) -> None:
@@ -46,8 +47,9 @@ class _Algorithm:
         self.service_list : List[Dict] = list()
         self.service_dict : Dict[Tuple[str, str], Tuple[Dict, Service]] = dict()
 
-    def add_devices(self, grpc_devices : DeviceList) -> None:
-        for grpc_device in grpc_devices.devices:
+    def add_devices(self, grpc_devices : Union[List[Device], DeviceList]) -> None:
+        if isinstance(grpc_devices, DeviceList): grpc_devices = grpc_devices.devices
+        for grpc_device in grpc_devices:
             json_device = compose_device(grpc_device)
             self.device_list.append(json_device)
 
@@ -62,8 +64,9 @@ class _Algorithm:
 
             self.endpoint_dict[device_uuid] = device_endpoint_dict
 
-    def add_links(self, grpc_links : LinkList) -> None:
-        for grpc_link in grpc_links.links:
+    def add_links(self, grpc_links : Union[List[Link], LinkList]) -> None:
+        if isinstance(grpc_links, LinkList): grpc_links = grpc_links.links
+        for grpc_link in grpc_links:
             json_link = compose_link(grpc_link)
             self.link_list.append(json_link)
 
@@ -206,7 +209,12 @@ class _Algorithm:
 
             for service_path_ero in response['path']:
                 path_hops = eropath_to_hops(service_path_ero['devices'], self.endpoint_to_link_dict)
-                connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid)
+                try:
+                    connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid)
+                except: # pylint: disable=bare-except
+                    # if not able to extrapolate sub-services and sub-connections,
+                    # assume single service and single connection
+                    connections = convert_explicit_path_hops_to_plain_connection(path_hops, service_uuid)
 
                 for connection in connections:
                     connection_uuid,device_layer,path_hops,_ = connection
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
index c1977cedb9b341fbb767a5fb8c829cd5f633884c..17a7e74ef573e4926d53045ab8888c71a3dd73d7 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
@@ -34,9 +34,11 @@ def compose_topology_id(topology_id : TopologyId) -> Dict:
     return {'contextId': context_uuid, 'topology_uuid': topology_uuid}
 
 def compose_service_id(service_id : ServiceId) -> Dict:
-    context_uuid = service_id.context_id.context_uuid.uuid
-
-    if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID
+    # force context_uuid to be always DEFAULT_CONTEXT_UUID for simplicity
+    # for interdomain contexts are managed in a particular way
+    #context_uuid = service_id.context_id.context_uuid.uuid
+    #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID
+    context_uuid = DEFAULT_CONTEXT_UUID
 
     service_uuid = service_id.service_uuid.uuid
     return {'contextId': context_uuid, 'service_uuid': service_uuid}
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
index f2c66cb24ca3c15c71f22dbe4eeca634e18d985a..7c7b62e2d039d2e6bad979b3601e09ca1c54ea51 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
@@ -94,3 +94,19 @@ def convert_explicit_path_hops_to_connections(
     connections.append(connection_stack.get())
     assert connection_stack.empty()
     return connections
+
+def convert_explicit_path_hops_to_plain_connection(
+    path_hops : List[Dict], main_connection_uuid : str
+) -> List[Tuple[str, DeviceLayerEnum, List[str], List[str]]]:
+
+    connection : Tuple[str, DeviceLayerEnum, List[str], List[str]] = \
+        (main_connection_uuid, DeviceLayerEnum.PACKET_DEVICE, [], [])
+
+    last_device_uuid = None
+    for path_hop in path_hops:
+        device_uuid = path_hop['device']
+        if last_device_uuid == device_uuid: continue
+        connection[2].append(path_hop)
+        last_device_uuid = device_uuid
+
+    return [connection]
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
index 7d61e1ef8f78950ec6f9bd0878de136d4a01b554..56e11b1b4a0293bcdbed2f1d3cd7c08814d7b161 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
@@ -80,6 +80,7 @@ class DeviceLayerEnum(IntEnum):
 DEVICE_TYPE_TO_LAYER = {
     DeviceTypeEnum.EMULATED_DATACENTER.value             : DeviceLayerEnum.APPLICATION_DEVICE,
     DeviceTypeEnum.DATACENTER.value                      : DeviceLayerEnum.APPLICATION_DEVICE,
+    DeviceTypeEnum.NETWORK.value                         : DeviceLayerEnum.APPLICATION_DEVICE,
 
     DeviceTypeEnum.EMULATED_PACKET_ROUTER.value          : DeviceLayerEnum.PACKET_DEVICE,
     DeviceTypeEnum.PACKET_ROUTER.value                   : DeviceLayerEnum.PACKET_DEVICE,
diff --git a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py
index 021940937c23a7cb461a603aa32a15f16626eb1d..a885ddb29c3fa70d6bccea18f43fef5b038aae68 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py
@@ -42,35 +42,43 @@
 # ]
 #
 
+import logging
 from typing import Dict, List
 
+LOGGER = logging.getLogger(__name__)
+
 def eropath_to_hops(ero_path : List[Dict], endpoint_to_link_dict : Dict) -> List[Dict]:
-    path_hops = []
-    for endpoint in ero_path:
-        device_uuid = endpoint['device_id']
-        endpoint_uuid = endpoint['endpoint_uuid']
+    try:
+        path_hops = []
+        for endpoint in ero_path:
+            device_uuid = endpoint['device_id']
+            endpoint_uuid = endpoint['endpoint_uuid']
 
-        if len(path_hops) == 0:
-            path_hops.append({'device': device_uuid, 'ingress_ep': endpoint_uuid})
-            continue
+            if len(path_hops) == 0:
+                path_hops.append({'device': device_uuid, 'ingress_ep': endpoint_uuid})
+                continue
 
-        last_hop = path_hops[-1]
-        if (last_hop['device'] == device_uuid):
-            if ('ingress_ep' not in last_hop) or ('egress_ep' in last_hop): continue
-            last_hop['egress_ep'] = endpoint_uuid
-            continue
+            last_hop = path_hops[-1]
+            if (last_hop['device'] == device_uuid):
+                if ('ingress_ep' not in last_hop) or ('egress_ep' in last_hop): continue
+                last_hop['egress_ep'] = endpoint_uuid
+                continue
 
-        endpoint_key = (last_hop['device'], last_hop['egress_ep'])
-        link_tuple = endpoint_to_link_dict.get(endpoint_key)
-        ingress = next(iter([
-            ep_id for ep_id in link_tuple[0]['link_endpoint_ids']
-            if (ep_id['endpoint_id']['device_id'] == device_uuid) and\
-                (ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid)
-        ]), None)
-        if ingress['endpoint_id']['device_id'] != device_uuid: raise Exception('Malformed path')
-        path_hops.append({
-            'device': ingress['endpoint_id']['device_id'],
-            'ingress_ep': ingress['endpoint_id']['endpoint_uuid'],
-            'egress_ep': endpoint_uuid,
-        })
-    return path_hops
+            endpoint_key = (last_hop['device'], last_hop['egress_ep'])
+            link_tuple = endpoint_to_link_dict.get(endpoint_key)
+            ingress = next(iter([
+                ep_id for ep_id in link_tuple[0]['link_endpoint_ids']
+                if (ep_id['endpoint_id']['device_id'] == device_uuid) and\
+                    (ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid)
+            ]), None)
+            if ingress['endpoint_id']['device_id'] != device_uuid: raise Exception('Malformed path')
+            path_hops.append({
+                'device': ingress['endpoint_id']['device_id'],
+                'ingress_ep': ingress['endpoint_id']['endpoint_uuid'],
+                'egress_ep': endpoint_uuid,
+            })
+        return path_hops
+    except:
+        LOGGER.exception('Unhandled exception: ero_path={:s} endpoint_to_link_dict={:s}'.format(
+            str(ero_path), str(endpoint_to_link_dict)))
+        raise
diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
index 18a5aea29eb7c025372d00828feb127336e90102..f12c9ab984205b9057dd1507114e5bc17d8deaa6 100644
--- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
+++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
@@ -60,12 +60,13 @@ def setup_config_rules(
             {'name': network_instance_name, 'type': 'L2VSI'}),
 
         json_config_rule_set(
-            '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index),
+            '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index),
             {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}),
 
         json_config_rule_set(
             '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
-            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}),
+            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
+            'subinterface': sub_interface_index}),
 
         json_config_rule_set(
             '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
@@ -107,10 +108,11 @@ def teardown_config_rules(
 
         json_config_rule_delete(
             '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
-            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}),
+            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
+            'subinterface': sub_interface_index}),
 
         json_config_rule_delete(
-            '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index),
+            '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index),
             {'name': if_cirid_name, 'index': sub_interface_index}),
 
         json_config_rule_delete(
diff --git a/src/slice/Dockerfile b/src/slice/Dockerfile
index 96a751d156edcaef38794ecfe5b409cbeb081e82..7dadc477f70667c827d4a9eb0ddd013c85b97344 100644
--- a/src/slice/Dockerfile
+++ b/src/slice/Dockerfile
@@ -64,6 +64,7 @@ RUN python3 -m pip install -r requirements.txt
 WORKDIR /var/teraflow
 COPY src/context/. context/
 COPY src/interdomain/. interdomain/
+COPY src/pathcomp/. pathcomp/
 COPY src/service/. service/
 COPY src/slice/. slice/
 
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index 53875f0e6ae7c8e3e7d5ac9dad7501a2136844c4..ada7218588391766147a02f9713b540016522aa7 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -17,11 +17,12 @@ from common.proto.context_pb2 import (
     Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Slice, SliceId, SliceStatusEnum)
 from common.proto.slice_pb2_grpc import SliceServiceServicer
 from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
+from common.tools.context_queries.InterDomain import is_multi_domain
 from common.tools.grpc.ConfigRules import copy_config_rules
 from common.tools.grpc.Constraints import copy_constraints
 from common.tools.grpc.EndPointIds import copy_endpoint_ids
 from common.tools.grpc.ServiceIds import update_service_ids
-from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from interdomain.client.InterdomainClient import InterdomainClient
 from service.client.ServiceClient import ServiceClient
@@ -42,103 +43,118 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         try:
             _slice = context_client.GetSlice(request.slice_id)
             #json_current_slice = grpc_message_to_json(_slice)
-        except:
+        except: # pylint: disable=bare-except
             #json_current_slice = {}
             slice_request = Slice()
-            slice_request.slice_id.CopyFrom(request.slice_id)
-            slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED
+            slice_request.slice_id.CopyFrom(request.slice_id) # pylint: disable=no-member
+            slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member
             context_client.SetSlice(slice_request)
             _slice = context_client.GetSlice(request.slice_id)
+
         slice_request = Slice()
         slice_request.CopyFrom(_slice)
 
+        if len(request.slice_endpoint_ids) < 2:
+            # unable to identify the kind of slice; just update endpoints, constraints and config rules
+            # update the slice in database, and return
+            # pylint: disable=no-member
+            copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids)
+            copy_constraints(request.slice_constraints, slice_request.slice_constraints)
+            copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules)
+            return context_client.SetSlice(slice_request)
+
         #LOGGER.info('json_current_slice = {:s}'.format(str(json_current_slice)))
         #json_updated_slice = grpc_message_to_json(request)
         #LOGGER.info('json_updated_slice = {:s}'.format(str(json_updated_slice)))
         #changes = deepdiff.DeepDiff(json_current_slice, json_updated_slice)
         #LOGGER.info('changes = {:s}'.format(str(changes)))
 
-        domains = set()
-        for slice_endpoint_id in request.slice_endpoint_ids:
-            device_uuid = slice_endpoint_id.device_id.device_uuid.uuid
-            device_parts = device_uuid.split('@')
-            domain_uuid = '' if len(device_parts) == 1 else device_parts[1]
-            domains.add(domain_uuid)
-        LOGGER.info('domains = {:s}'.format(str(domains)))
-        is_multi_domain = len(domains) > 1
-        LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain)))
-
-        if is_multi_domain:
+        if is_multi_domain(context_client, request.slice_endpoint_ids):
             interdomain_client = InterdomainClient()
             slice_id = interdomain_client.RequestSlice(request)
-        else:
-            service_id = ServiceId()
-            context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid
-            slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid
-
-            service_client = ServiceClient()
-            try:
-                _service = context_client.GetService(service_id)
-            except:
-                service_request = Service()
-                service_request.service_id.CopyFrom(service_id)
-                service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN
-                service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
-                service_reply = service_client.CreateService(service_request)
-                if service_reply != service_request.service_id: # pylint: disable=no-member
-                    raise Exception('Service creation failed. Wrong Service Id was returned')
-                _service = context_client.GetService(service_id)
+            slice_ = context_client.GetSlice(slice_id)
+            slice_active = Slice()
+            slice_active.CopyFrom(slice_)
+            slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
+            context_client.SetSlice(slice_active)
+            return slice_id
+
+        # Local domain slice
+        service_id = ServiceId()
+        # pylint: disable=no-member
+        context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid
+        slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid
+
+        service_client = ServiceClient()
+        try:
+            _service = context_client.GetService(service_id)
+        except: # pylint: disable=bare-except
+            # pylint: disable=no-member
             service_request = Service()
-            service_request.CopyFrom(_service)
-
-            copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids)
-            copy_constraints(request.slice_constraints, service_request.service_constraints)
-            copy_config_rules(request.slice_config.config_rules, service_request.service_config.config_rules)
-
+            service_request.service_id.CopyFrom(service_id)
             service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN
-            for config_rule in request.slice_config.config_rules:
-                LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule)))
-                config_rule_kind = config_rule.WhichOneof('config_rule')
-                LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind)))
-                if config_rule_kind != 'custom': continue
-                custom = config_rule.custom
-                resource_key = custom.resource_key
-                LOGGER.info('resource_key: {:s}'.format(str(resource_key)))
-
-                # TODO: parse resource key with regular expression, e.g.:
-                #    m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s)
-                if not resource_key.startswith('/device'): continue
-                if not resource_key.endswith('/settings'): continue
-
-                resource_value = json.loads(custom.resource_value)
-                LOGGER.info('resource_value: {:s}'.format(str(resource_value)))
-
-                if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN:
-                    if (resource_value.get('address_ip') is not None and \
-                        resource_value.get('address_prefix') is not None):
-                        service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
-                        LOGGER.info('is L3')
-                    else:
-                        service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM
-                        LOGGER.info('is L2')
-                    break
-
-            service_reply = service_client.UpdateService(service_request)
+            service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
+            service_reply = service_client.CreateService(service_request)
             if service_reply != service_request.service_id: # pylint: disable=no-member
-                raise Exception('Service update failed. Wrong Service Id was returned')
-
-            copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids)
-            copy_constraints(request.slice_constraints, slice_request.slice_constraints)
-            copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules)
-
-            update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid)
-            context_client.SetSlice(slice_request)
-            slice_id = slice_request.slice_id
+                # pylint: disable=raise-missing-from
+                raise Exception('Service creation failed. Wrong Service Id was returned')
+            _service = context_client.GetService(service_id)
+        service_request = Service()
+        service_request.CopyFrom(_service)
+
+        # pylint: disable=no-member
+        copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids)
+        copy_constraints(request.slice_constraints, service_request.service_constraints)
+        copy_config_rules(request.slice_config.config_rules, service_request.service_config.config_rules)
+
+        service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN
+        for config_rule in request.slice_config.config_rules:
+            LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule)))
+            config_rule_kind = config_rule.WhichOneof('config_rule')
+            LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind)))
+            if config_rule_kind != 'custom': continue
+            custom = config_rule.custom
+            resource_key = custom.resource_key
+            LOGGER.info('resource_key: {:s}'.format(str(resource_key)))
+
+            # TODO: parse resource key with regular expression, e.g.:
+            #    m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s)
+            if not resource_key.startswith('/device'): continue
+            if not resource_key.endswith('/settings'): continue
+
+            resource_value = json.loads(custom.resource_value)
+            LOGGER.info('resource_value: {:s}'.format(str(resource_value)))
+
+            if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN:
+                if (resource_value.get('address_ip') is not None and \
+                    resource_value.get('address_prefix') is not None):
+                    service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
+                    LOGGER.info('is L3')
+                else:
+                    service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM
+                    LOGGER.info('is L2')
+                break
+
+        if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN:
+            service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM
+            LOGGER.info('assume L2')
+
+        service_reply = service_client.UpdateService(service_request)
+        if service_reply != service_request.service_id: # pylint: disable=no-member
+            raise Exception('Service update failed. Wrong Service Id was returned')
+
+        copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids)
+        copy_constraints(request.slice_constraints, slice_request.slice_constraints)
+        copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules)
+
+        update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid)
+        context_client.SetSlice(slice_request)
+        slice_id = slice_request.slice_id
 
         slice_ = context_client.GetSlice(slice_id)
         slice_active = Slice()
         slice_active.CopyFrom(slice_)
-        slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE
+        slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
         context_client.SetSlice(slice_active)
         return slice_id
 
@@ -175,21 +191,11 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         context_client = ContextClient()
         try:
             _slice = context_client.GetSlice(request)
-        except:
+        except: # pylint: disable=bare-except
             return Empty()
 
-        domains = set()
-        for slice_endpoint_id in _slice.slice_endpoint_ids:
-            device_uuid = slice_endpoint_id.device_id.device_uuid.uuid
-            device_parts = device_uuid.split('@')
-            domain_uuid = '' if len(device_parts) == 1 else device_parts[1]
-            domains.add(domain_uuid)
-        LOGGER.info('domains = {:s}'.format(str(domains)))
-        is_multi_domain = len(domains) > 1
-        LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain)))
-
-        if is_multi_domain:
-            interdomain_client = InterdomainClient()
+        if is_multi_domain(context_client, _slice.slice_endpoint_ids):
+            #interdomain_client = InterdomainClient()
             #slice_id = interdomain_client.DeleteSlice(request)
             raise NotImplementedError('Delete inter-domain slice')
         else:
diff --git a/src/slice/service/__main__.py b/src/slice/service/__main__.py
index a59c54b4b1b56865871d331409c1a7f60629aec6..b2f4536503ac176628c42cf0211315089697c50e 100644
--- a/src/slice/service/__main__.py
+++ b/src/slice/service/__main__.py
@@ -15,7 +15,9 @@
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
 from common.Constants import ServiceNameEnum
-from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    wait_for_environment_variables)
 from .SliceService import SliceService
 
 terminate = threading.Event()
diff --git a/src/tests/nfvsdn22 b/src/tests/nfvsdn22
new file mode 120000
index 0000000000000000000000000000000000000000..e8122da56327bf631c751cbe38ce6b37d3dc7378
--- /dev/null
+++ b/src/tests/nfvsdn22
@@ -0,0 +1 @@
+./scenario2
\ No newline at end of file
diff --git a/src/tests/scenario2/.gitignore b/src/tests/scenario2/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0a3f4400d5c88b1af32c7667d69d2fdc12d5424e
--- /dev/null
+++ b/src/tests/scenario2/.gitignore
@@ -0,0 +1,2 @@
+# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
+descriptors_real.json
diff --git a/src/tests/scenario2/MultiIngressController.txt b/src/tests/scenario2/MultiIngressController.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b2d6d322465cb1d776b043e5de4dd474d2f0d9c6
--- /dev/null
+++ b/src/tests/scenario2/MultiIngressController.txt
@@ -0,0 +1,35 @@
+# Ref: https://kubernetes.github.io/ingress-nginx/user-guide/multiple-ingress/
+# Ref: https://fabianlee.org/2021/07/29/kubernetes-microk8s-with-multiple-metallb-endpoints-and-nginx-ingress-controllers/
+
+# Check node limits
+kubectl describe nodes
+
+# Create secondary ingress controllers
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml
+
+# Delete secondary ingress controllers
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml
+
+source nfvsdn22/deploy_specs_dom1.sh
+./deploy.sh
+
+source nfvsdn22/deploy_specs_dom2.sh
+./deploy.sh
+
+source nfvsdn22/deploy_specs_dom3.sh
+./deploy.sh
+
+source nfvsdn22/deploy_specs_dom4.sh
+./deploy.sh
+
+# Manually deploy ingresses for domains
+kubectl --namespace tfs-dom1 apply -f nfvsdn22/tfs-ingress-dom1.yaml
+kubectl --namespace tfs-dom2 apply -f nfvsdn22/tfs-ingress-dom2.yaml
+kubectl --namespace tfs-dom3 apply -f nfvsdn22/tfs-ingress-dom3.yaml
+kubectl --namespace tfs-dom4 apply -f nfvsdn22/tfs-ingress-dom4.yaml
diff --git a/src/tests/scenario2/Scenario.md b/src/tests/scenario2/Scenario.md
new file mode 100644
index 0000000000000000000000000000000000000000..8dad4691ade669522b5c82a5e4ed07e5d0279492
--- /dev/null
+++ b/src/tests/scenario2/Scenario.md
@@ -0,0 +1,47 @@
+# Scenario:
+
+- 4 TFS instances
+
+    - domain D1 (source for e-2-e service)
+        5 routers + 1 DC
+        R1@D1/2 <--> R2@D1/1
+        R2@D1/3 <--> R3@D1/2
+        R2@D1/5 <--> R5@D1/2
+        R3@D1/4 <--> R4@D1/3
+        R4@D1/5 <--> R5@D1/4
+        R5@D1/1 <--> R1@D1/5
+        R1@D1/100 <--> DCGW@D1/eth1
+
+    - domain D2 (transit for e-2-e service)
+        6 routers
+        R1@D2/2 <--> R2@D2/1
+        R1@D2/6 <--> R6@D2/1
+        R1@D2/5 <--> R5@D2/1
+        R2@D2/3 <--> R3@D2/2
+        R2@D2/4 <--> R4@D2/2
+        R2@D2/5 <--> R5@D2/2
+        R2@D2/6 <--> R6@D2/2
+        R3@D2/6 <--> R6@D2/3
+        R4@D2/5 <--> R5@D2/4
+
+    - domain D3 (transit for e-2-e service)
+        4 routers
+        R1@D3/2 <--> R2@D3/1
+        R2@D3/3 <--> R3@D3/2
+        R3@D3/4 <--> R4@D3/3
+        R4@D3/1 <--> R1@D3/4
+        R2@D3/4 <--> R4@D3/2
+
+    - domain D4 (end for e-2-e service)
+        3 routers
+        R1@D4/2 <--> R2@D4/1
+        R1@D4/3 <--> R3@D4/1
+        R2@D4/3 <--> R3@D4/2
+        R3@D4/100 <--> DCGW@D4/eth1
+
+    - interdomain links
+        R4@D1/10 <--> R1@D2/10
+        R5@D1/10 <--> R1@D3/10
+        R4@D2/10 <--> R2@D4/10
+        R5@D2/10 <--> R2@D3/10
+        R3@D3/10 <--> R1@D4/10
diff --git a/src/tests/scenario2/__init__.py b/src/tests/scenario2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/scenario2/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/scenario2/delete_all.sh b/src/tests/scenario2/delete_all.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5d3e55831c85a3ef547d8e02a29f507663bfa789
--- /dev/null
+++ b/src/tests/scenario2/delete_all.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# Delete old namespaces
+kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4 tfs-bchain
+
+# Delete secondary ingress controllers
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml
diff --git a/src/tests/scenario2/deploy_all.sh b/src/tests/scenario2/deploy_all.sh
new file mode 100755
index 0000000000000000000000000000000000000000..582a97ac57f624de93e5865b7dcb190a6797bd5b
--- /dev/null
+++ b/src/tests/scenario2/deploy_all.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Delete old namespaces
+kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4
+
+# Delete secondary ingress controllers
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml
+
+# Delete MockBlockchain
+#kubectl delete namespace tfs-bchain
+
+# Create secondary ingress controllers
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml
+
+# Create MockBlockchain
+#./deploy_mock_blockchain.sh
+
+# Deploy TFS for Domain 1
+source nfvsdn22/deploy_specs_dom1.sh
+./deploy.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom1.sh
+
+# Deploy TFS for Domain 2
+source nfvsdn22/deploy_specs_dom2.sh
+./deploy.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom2.sh
+
+# Deploy TFS for Domain 3
+source nfvsdn22/deploy_specs_dom3.sh
+./deploy.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom3.sh
+
+# Deploy TFS for Domain 4
+source nfvsdn22/deploy_specs_dom4.sh
+./deploy.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom4.sh
diff --git a/src/tests/scenario2/deploy_specs_dom1.sh b/src/tests/scenario2/deploy_specs_dom1.sh
new file mode 100644
index 0000000000000000000000000000000000000000..06d32e005f36d883c44d195ccfd20ec9b7e9a4b8
--- /dev/null
+++ b/src/tests/scenario2/deploy_specs_dom1.sh
@@ -0,0 +1,21 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs-dom1"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom1.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD="NO"
diff --git a/src/tests/scenario2/deploy_specs_dom2.sh b/src/tests/scenario2/deploy_specs_dom2.sh
new file mode 100644
index 0000000000000000000000000000000000000000..df1726cd31606ada5d2a33d50550b52c02ccbee4
--- /dev/null
+++ b/src/tests/scenario2/deploy_specs_dom2.sh
@@ -0,0 +1,21 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs-dom2"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom2.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD="YES"
diff --git a/src/tests/scenario2/deploy_specs_dom3.sh b/src/tests/scenario2/deploy_specs_dom3.sh
new file mode 100644
index 0000000000000000000000000000000000000000..027762e3e70d0d1cd76b8d3303ae17c97ea781c7
--- /dev/null
+++ b/src/tests/scenario2/deploy_specs_dom3.sh
@@ -0,0 +1,21 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs-dom3"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom3.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD="YES"
diff --git a/src/tests/scenario2/deploy_specs_dom4.sh b/src/tests/scenario2/deploy_specs_dom4.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a09e9fa899a0ca9fc941fd09496113a20aebbe59
--- /dev/null
+++ b/src/tests/scenario2/deploy_specs_dom4.sh
@@ -0,0 +1,21 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs-dom4"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom4.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD="YES"
diff --git a/src/tests/scenario2/descriptors/domain1.json b/src/tests/scenario2/descriptors/domain1.json
new file mode 100644
index 0000000000000000000000000000000000000000..043b3955f017631203a437cf853c3617cddf93c8
--- /dev/null
+++ b/src/tests/scenario2/descriptors/domain1.json
@@ -0,0 +1,148 @@
+{
+    "contexts": [
+        {
+            "context_id": {"context_uuid": {"uuid": "admin"}},
+            "topology_ids": [
+                {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}
+            ], "service_ids": []
+        }
+    ],
+    "topologies": [
+        {
+            "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}},
+            "device_ids": [
+                {"device_uuid": {"uuid": "DC1"}},
+                {"device_uuid": {"uuid": "R1@D1"}},
+                {"device_uuid": {"uuid": "R2@D1"}},
+                {"device_uuid": {"uuid": "R3@D1"}},
+                {"device_uuid": {"uuid": "R4@D1"}},
+                {"device_uuid": {"uuid": "R5@D1"}}
+            ], "link_ids": [
+                {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}},
+                {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}},
+                {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}},
+                {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}},
+                {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}},
+                {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}},
+                {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}}
+            ]
+        }
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/border", "uuid": "D1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R1@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "5"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "DC1"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R2@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "5"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R3@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R4@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "5"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D2"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R5@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "4"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D3"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "D1"}},
+                {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}},
+                {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}},
+                {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}},
+                {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}},
+                {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}},
+                {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}},
+                {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/scenario2/descriptors/domain2.json b/src/tests/scenario2/descriptors/domain2.json
new file mode 100644
index 0000000000000000000000000000000000000000..81d397abfd3571b1177a06172188b00eed2f3afc
--- /dev/null
+++ b/src/tests/scenario2/descriptors/domain2.json
@@ -0,0 +1,166 @@
+{
+    "contexts": [
+        {
+            "context_id": {"context_uuid": {"uuid": "admin"}},
+            "topology_ids": [
+                {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}
+            ], "service_ids": []
+        }
+    ],
+    "topologies": [
+        {
+            "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}},
+            "device_ids": [
+                {"device_uuid": {"uuid": "R1@D2"}},
+                {"device_uuid": {"uuid": "R2@D2"}},
+                {"device_uuid": {"uuid": "R3@D2"}},
+                {"device_uuid": {"uuid": "R4@D2"}},
+                {"device_uuid": {"uuid": "R5@D2"}},
+                {"device_uuid": {"uuid": "R6@D2"}}
+            ], "link_ids": [
+                {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}},
+                {"link_uuid": {"uuid": "R1@D2/6==R6@D2/1"}},
+                {"link_uuid": {"uuid": "R1@D2/5==R5@D2/1"}},
+                {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}},
+                {"link_uuid": {"uuid": "R2@D2/4==R4@D2/2"}},
+                {"link_uuid": {"uuid": "R2@D2/5==R5@D2/2"}},
+                {"link_uuid": {"uuid": "R2@D2/6==R6@D2/2"}},
+                {"link_uuid": {"uuid": "R3@D2/6==R6@D2/3"}},
+                {"link_uuid": {"uuid": "R4@D2/5==R5@D2/4"}}
+            ]
+        }
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "R1@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "5"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "6"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D1"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R2@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "4"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "5"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "6"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R3@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "6"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R4@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "5"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R5@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "4"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D3"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R6@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}},
+                {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R1@D2/6==R6@D2/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "6"}},
+                {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R1@D2/5==R5@D2/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "5"}},
+                {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}},
+                {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D2/4==R4@D2/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "4"}},
+                {"device_id": {"device_uuid": {"uuid": "R4@D2"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D2/5==R5@D2/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "5"}},
+                {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D2/6==R6@D2/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "6"}},
+                {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R3@D2/6==R6@D2/3"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "6"}},
+                {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "3"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R4@D2/5==R5@D2/4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R4@D2"}}, "endpoint_uuid": {"uuid": "5"}},
+                {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "4"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/scenario2/descriptors/domain3.json b/src/tests/scenario2/descriptors/domain3.json
new file mode 100644
index 0000000000000000000000000000000000000000..3a8e47d30dcef471b388f46d4ba5df5df4716256
--- /dev/null
+++ b/src/tests/scenario2/descriptors/domain3.json
@@ -0,0 +1,110 @@
+{
+    "contexts": [
+        {
+            "context_id": {"context_uuid": {"uuid": "admin"}},
+            "topology_ids": [
+                {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D3"}}
+            ], "service_ids": []
+        }
+    ],
+    "topologies": [
+        {
+            "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D3"}},
+            "device_ids": [
+                {"device_uuid": {"uuid": "R1@D3"}},
+                {"device_uuid": {"uuid": "R2@D3"}},
+                {"device_uuid": {"uuid": "R3@D3"}},
+                {"device_uuid": {"uuid": "R4@D3"}}
+            ], "link_ids": [
+                {"link_uuid": {"uuid": "R1@D3/2==R2@D3/1"}},
+                {"link_uuid": {"uuid": "R2@D3/3==R3@D3/2"}},
+                {"link_uuid": {"uuid": "R3@D3/4==R4@D3/3"}},
+                {"link_uuid": {"uuid": "R4@D3/1==R1@D3/4"}},
+                {"link_uuid": {"uuid": "R2@D3/4==R4@D3/2"}}                
+            ]
+        }
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "R1@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "4"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D1"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R2@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "4"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D2"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R3@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "4"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R4@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "R1@D3/2==R2@D3/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1@D3"}}, "endpoint_uuid": {"uuid": "2"}},
+                {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D3/3==R3@D3/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "3"}},
+                {"device_id": {"device_uuid": {"uuid": "R3@D3"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R3@D3/4==R4@D3/3"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R3@D3"}}, "endpoint_uuid": {"uuid": "4"}},
+                {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "3"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R4@D3/1==R1@D3/4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "1"}},
+                {"device_id": {"device_uuid": {"uuid": "R1@D3"}}, "endpoint_uuid": {"uuid": "4"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D3/4==R4@D3/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "4"}},
+                {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/scenario2/descriptors/domain4.json b/src/tests/scenario2/descriptors/domain4.json
new file mode 100644
index 0000000000000000000000000000000000000000..d9e2d049ad2417beb96b8f3434ed9e94febb4808
--- /dev/null
+++ b/src/tests/scenario2/descriptors/domain4.json
@@ -0,0 +1,101 @@
+{
+    "contexts": [
+        {
+            "context_id": {"context_uuid": {"uuid": "admin"}},
+            "topology_ids": [
+                {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D4"}}
+            ], "service_ids": []
+        }
+    ],
+    "topologies": [
+        {
+            "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D4"}},
+            "device_ids": [
+                {"device_uuid": {"uuid": "DC2"}},
+                {"device_uuid": {"uuid": "R1@D4"}},
+                {"device_uuid": {"uuid": "R2@D4"}},
+                {"device_uuid": {"uuid": "R3@D4"}}
+            ], "link_ids": [
+                {"link_uuid": {"uuid": "R3@D4/DC2==DC2/D4"}},
+                {"link_uuid": {"uuid": "R1@D4/2==R2@D4/1"}},
+                {"link_uuid": {"uuid": "R1@D4/3==R3@D4/1"}},
+                {"link_uuid": {"uuid": "R2@D4/3==R3@D4/2"}}
+            ]
+        }
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/border", "uuid": "D4"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R1@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D3"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R2@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "3"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "D2"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R3@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2"},
+                    {"sample_types": [], "type": "copper/border", "uuid": "DC2"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "R3@D4/DC2==DC2/D4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "D4"}},
+                {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "DC2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R1@D4/2==R2@D4/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1@D4"}}, "endpoint_uuid": {"uuid": "2"}},
+                {"device_id": {"device_uuid": {"uuid": "R2@D4"}}, "endpoint_uuid": {"uuid": "1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R1@D4/3==R3@D4/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1@D4"}}, "endpoint_uuid": {"uuid": "3"}},
+                {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R2@D4/3==R3@D4/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2@D4"}}, "endpoint_uuid": {"uuid": "3"}},
+                {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "2"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/scenario2/descriptors/idc-slice.json b/src/tests/scenario2/descriptors/idc-slice.json
new file mode 100644
index 0000000000000000000000000000000000000000..634209284c00cc8602db2bf91e6088ca120710df
--- /dev/null
+++ b/src/tests/scenario2/descriptors/idc-slice.json
@@ -0,0 +1,20 @@
+{
+    "slices":[
+        {
+            "slice_id":{"context_id":{"context_uuid":{"uuid":"admin"}},"slice_uuid":{"uuid":"idc-slice"}},
+            "slice_endpoint_ids":[
+                {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}},
+                {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}}
+            ],
+            "slice_status":{"slice_status":1},
+            "slice_service_ids":[],
+            "slice_subslice_ids":[],
+            "slice_constraints":[],
+            "slice_config":{"config_rules":[
+                {"action":1,"custom":{"resource_key":"/settings","resource_value":"{}"}},
+                {"action":1,"custom":{"resource_key":"/device[DC1]/endpoint[int]/settings","resource_value":"{}"}},
+                {"action":1,"custom":{"resource_key":"/device[DC2]/endpoint[int]/settings","resource_value":"{}"}}
+            ]}
+        }
+    ]
+}
diff --git a/src/tests/scenario2/dump_logs.sh b/src/tests/scenario2/dump_logs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c2298fd8ef735eab102d463391004a818c874b42
--- /dev/null
+++ b/src/tests/scenario2/dump_logs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+rm -rf tmp/exec
+
+echo "Collecting logs for MockBlockChain..."
+mkdir -p tmp/exec/mbc
+kubectl --namespace tfs-bchain logs deployments/mock-blockchain server > tmp/exec/mbc/mock-blockchain.log
+printf "\n"
+
+echo "Collecting logs for Domain 1..."
+mkdir -p tmp/exec/dom1
+kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/exec/dom1/context.log
+kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/exec/dom1/device.log
+kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/dom1/service.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log
+kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log
+kubectl --namespace tfs-dom1 logs deployments/interdomainservice server > tmp/exec/dom1/interdomain.log
+kubectl --namespace tfs-dom1 logs deployments/dltservice connector > tmp/exec/dom1/dlt-connector.log
+kubectl --namespace tfs-dom1 logs deployments/dltservice gateway > tmp/exec/dom1/dlt-gateway.log
+printf "\n"
+
+echo "Collecting logs for Domain 2..."
+mkdir -p tmp/exec/dom2
+kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/exec/dom2/context.log
+kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/exec/dom2/device.log
+kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/dom2/service.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log
+kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log
+kubectl --namespace tfs-dom2 logs deployments/interdomainservice server > tmp/exec/dom2/interdomain.log
+kubectl --namespace tfs-dom2 logs deployments/dltservice connector > tmp/exec/dom2/dlt-connector.log
+kubectl --namespace tfs-dom2 logs deployments/dltservice gateway > tmp/exec/dom2/dlt-gateway.log
+printf "\n"
+
+echo "Collecting logs for Domain 3..."
+mkdir -p tmp/exec/dom3
+kubectl --namespace tfs-dom3 logs deployments/contextservice server > tmp/exec/dom3/context.log
+kubectl --namespace tfs-dom3 logs deployments/deviceservice server > tmp/exec/dom3/device.log
+kubectl --namespace tfs-dom3 logs deployments/serviceservice server > tmp/exec/dom3/service.log
+kubectl --namespace tfs-dom3 logs deployments/pathcompservice frontend > tmp/exec/dom3/pathcomp-frontend.log
+kubectl --namespace tfs-dom3 logs deployments/pathcompservice backend > tmp/exec/dom3/pathcomp-backend.log
+kubectl --namespace tfs-dom3 logs deployments/sliceservice server > tmp/exec/dom3/slice.log
+kubectl --namespace tfs-dom3 logs deployments/interdomainservice server > tmp/exec/dom3/interdomain.log
+kubectl --namespace tfs-dom3 logs deployments/dltservice connector > tmp/exec/dom3/dlt-connector.log
+kubectl --namespace tfs-dom3 logs deployments/dltservice gateway > tmp/exec/dom3/dlt-gateway.log
+printf "\n"
+
+echo "Collecting logs for Domain 4..."
+mkdir -p tmp/exec/dom4
+kubectl --namespace tfs-dom4 logs deployments/contextservice server > tmp/exec/dom4/context.log
+kubectl --namespace tfs-dom4 logs deployments/deviceservice server > tmp/exec/dom4/device.log
+kubectl --namespace tfs-dom4 logs deployments/serviceservice server > tmp/exec/dom4/service.log
+kubectl --namespace tfs-dom4 logs deployments/pathcompservice frontend > tmp/exec/dom4/pathcomp-frontend.log
+kubectl --namespace tfs-dom4 logs deployments/pathcompservice backend > tmp/exec/dom4/pathcomp-backend.log
+kubectl --namespace tfs-dom4 logs deployments/sliceservice server > tmp/exec/dom4/slice.log
+kubectl --namespace tfs-dom4 logs deployments/interdomainservice server > tmp/exec/dom4/interdomain.log
+kubectl --namespace tfs-dom4 logs deployments/dltservice connector > tmp/exec/dom4/dlt-connector.log
+kubectl --namespace tfs-dom4 logs deployments/dltservice gateway > tmp/exec/dom4/dlt-gateway.log
+printf "\n"
+
+echo "Done!"
diff --git a/src/tests/scenario2/fast_redeploy.sh b/src/tests/scenario2/fast_redeploy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c4880a5afb1e5f40f0848437f51d39447c2c0673
--- /dev/null
+++ b/src/tests/scenario2/fast_redeploy.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4
+
+echo "Deploying tfs-dom1 ..."
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml                 > ./tmp/logs/deploy-tfs-dom1.log
+kubectl create namespace tfs-dom1                                             > ./tmp/logs/deploy-tfs-dom1.log
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml                  > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/contextservice.yaml     > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/deviceservice.yaml      > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/pathcompservice.yaml    > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/serviceservice.yaml     > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/sliceservice.yaml       > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/dltservice.yaml         > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/webuiservice.yaml       > ./tmp/logs/deploy-tfs-dom1.log
+kubectl --namespace tfs-dom1 apply -f nfvsdn22/tfs-ingress-dom1.yaml          > ./tmp/logs/deploy-tfs-dom1.log
+printf "\n"
+
+echo "Deploying tfs-dom2 ..."
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml                 > ./tmp/logs/deploy-tfs-dom2.log
+kubectl create namespace tfs-dom2                                             > ./tmp/logs/deploy-tfs-dom2.log
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml                  > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/contextservice.yaml     > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/deviceservice.yaml      > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/pathcompservice.yaml    > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/serviceservice.yaml     > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/sliceservice.yaml       > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/dltservice.yaml         > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/webuiservice.yaml       > ./tmp/logs/deploy-tfs-dom2.log
+kubectl --namespace tfs-dom2 apply -f nfvsdn22/tfs-ingress-dom2.yaml          > ./tmp/logs/deploy-tfs-dom2.log
+printf "\n"
+
+echo "Deploying tfs-dom3 ..."
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml                 > ./tmp/logs/deploy-tfs-dom3.log
+kubectl create namespace tfs-dom3                                             > ./tmp/logs/deploy-tfs-dom3.log
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml                  > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/contextservice.yaml     > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/deviceservice.yaml      > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/pathcompservice.yaml    > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/serviceservice.yaml     > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/sliceservice.yaml       > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/dltservice.yaml         > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/webuiservice.yaml       > ./tmp/logs/deploy-tfs-dom3.log
+kubectl --namespace tfs-dom3 apply -f nfvsdn22/tfs-ingress-dom3.yaml          > ./tmp/logs/deploy-tfs-dom3.log
+printf "\n"
+
+echo "Deploying tfs-dom4 ..."
+kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml                 > ./tmp/logs/deploy-tfs-dom4.log
+kubectl create namespace tfs-dom4                                             > ./tmp/logs/deploy-tfs-dom4.log
+kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml                  > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/contextservice.yaml     > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/deviceservice.yaml      > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/pathcompservice.yaml    > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/serviceservice.yaml     > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/sliceservice.yaml       > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/dltservice.yaml         > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/webuiservice.yaml       > ./tmp/logs/deploy-tfs-dom4.log
+kubectl --namespace tfs-dom4 apply -f nfvsdn22/tfs-ingress-dom4.yaml          > ./tmp/logs/deploy-tfs-dom4.log
+printf "\n"
+
+echo "Waiting tfs-dom1 ..."
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/dltservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/interdomainservice
+kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Waiting tfs-dom2 ..."
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/dltservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/interdomainservice
+kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Waiting tfs-dom3 ..."
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/dltservice
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/interdomainservice
+kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Waiting tfs-dom4 ..."
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/dltservice
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/interdomainservice
+kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Done!"
diff --git a/src/tests/scenario2/nginx-ingress-controller-dom1.yaml b/src/tests/scenario2/nginx-ingress-controller-dom1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1aa1ba48be1bc78e5b0b349dd821e18f80b6953a
--- /dev/null
+++ b/src/tests/scenario2/nginx-ingress-controller-dom1.yaml
@@ -0,0 +1,120 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-dom1
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-dom1
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-dom1
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-dom1
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-dom1
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-dom1
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-dom1
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-dom1
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-dom1
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8001
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4431
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12541
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom1
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom1
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom1
+        - --election-id=ingress-controller-leader-dom1
+        - --controller-class=tfs.etsi.org/controller-class-dom1
+        - --ingress-class=tfs-ingress-class-dom1
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/scenario2/nginx-ingress-controller-dom2.yaml b/src/tests/scenario2/nginx-ingress-controller-dom2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2dac1ecd26a5fd1c679b8e92ae28b51797987b71
--- /dev/null
+++ b/src/tests/scenario2/nginx-ingress-controller-dom2.yaml
@@ -0,0 +1,120 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-dom2
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-dom2
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-dom2
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-dom2
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-dom2
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-dom2
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-dom2
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-dom2
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-dom2
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8002
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4432
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12542
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom2
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom2
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom2
+        - --election-id=ingress-controller-leader-dom2
+        - --controller-class=tfs.etsi.org/controller-class-dom2
+        - --ingress-class=tfs-ingress-class-dom2
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/scenario2/nginx-ingress-controller-dom3.yaml b/src/tests/scenario2/nginx-ingress-controller-dom3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..06eb6b75342e2b6340f6868404c82504da8e09ec
--- /dev/null
+++ b/src/tests/scenario2/nginx-ingress-controller-dom3.yaml
@@ -0,0 +1,120 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-dom3
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-dom3
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-dom3
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-dom3
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-dom3
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-dom3
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-dom3
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-dom3
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-dom3
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8003
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4433
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12543
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom3
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom3
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom3
+        - --election-id=ingress-controller-leader-dom3
+        - --controller-class=tfs.etsi.org/controller-class-dom3
+        - --ingress-class=tfs-ingress-class-dom3
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/scenario2/nginx-ingress-controller-dom4.yaml b/src/tests/scenario2/nginx-ingress-controller-dom4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c5c2e2f7004cd5ec8b5856b185c4c9de937a7d3f
--- /dev/null
+++ b/src/tests/scenario2/nginx-ingress-controller-dom4.yaml
@@ -0,0 +1,120 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-dom4
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-dom4
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-dom4
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-dom4
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-dom4
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-dom4
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-dom4
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-dom4
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-dom4
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8004
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4434
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12544
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom4
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom4
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom4
+        - --election-id=ingress-controller-leader-dom4
+        - --controller-class=tfs.etsi.org/controller-class-dom4
+        - --ingress-class=tfs-ingress-class-dom4
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/scenario2/reset.sh b/src/tests/scenario2/reset.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2bf2cd05559f632b960a5674ea59e334f5123a53
--- /dev/null
+++ b/src/tests/scenario2/reset.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+kubectl --namespace tfs-dom1 scale --replicas=0 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom1 scale --replicas=1 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
diff --git a/src/tests/scenario2/show_deploy.sh b/src/tests/scenario2/show_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..081b5d3f9430cc3f68b0c1abdf39f0b05eeefae5
--- /dev/null
+++ b/src/tests/scenario2/show_deploy.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom1 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom1 get ingress
+printf "\n"
diff --git a/src/tests/scenario2/tfs-ingress-dom1.yaml b/src/tests/scenario2/tfs-ingress-dom1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bf2e40352d5acd85fcf9ee446df1a312a40556d6
--- /dev/null
+++ b/src/tests/scenario2/tfs-ingress-dom1.yaml
@@ -0,0 +1,39 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-dom1
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-dom1
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/tests/scenario2/tfs-ingress-dom2.yaml b/src/tests/scenario2/tfs-ingress-dom2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..40d9480d75dfad817bb1ffe2052a9a71dbb7322d
--- /dev/null
+++ b/src/tests/scenario2/tfs-ingress-dom2.yaml
@@ -0,0 +1,39 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-dom2
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-dom2
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/tests/scenario2/tfs-ingress-dom3.yaml b/src/tests/scenario2/tfs-ingress-dom3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..28668b424aa8bd957e12e53583317f336e3b0640
--- /dev/null
+++ b/src/tests/scenario2/tfs-ingress-dom3.yaml
@@ -0,0 +1,39 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-dom3
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-dom3
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/tests/scenario2/tfs-ingress-dom4.yaml b/src/tests/scenario2/tfs-ingress-dom4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3774c327ca9ff6d46d538c7a2530a744187b957d
--- /dev/null
+++ b/src/tests/scenario2/tfs-ingress-dom4.yaml
@@ -0,0 +1,39 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-dom4
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-dom4
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index 75e1036420d0bc88a790fb7b65f4f4900abaaadd..d60cca6597ced52db8e320f3ba1beb2b032be65b 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -19,10 +19,10 @@ from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 
 def get_working_context() -> str:
-    if 'context_uuid' in session:
-        return session['context_uuid']
-    else:
-        return 'Not selected'
+    return session['context_uuid'] if 'context_uuid' in session else '---'
+
+def get_working_topology() -> str:
+    return session['topology_uuid'] if 'topology_uuid' in session else '---'
 
 def liveness():
     pass
@@ -85,6 +85,7 @@ def create_app(use_config=None, web_app_root=None):
     app.jinja_env.filters['from_json'] = from_json
     
     app.jinja_env.globals.update(get_working_context=get_working_context)
+    app.jinja_env.globals.update(get_working_topology=get_working_topology)
 
     if web_app_root is not None:
         app.wsgi_app = SetSubAppMiddleware(app.wsgi_app, web_app_root)
diff --git a/src/webui/service/__main__.py b/src/webui/service/__main__.py
index c194be4bcfe71f3665dba75a109aa5fdf9646a8d..ddbda9c511eac4554c168128b3318b3107d892d7 100644
--- a/src/webui/service/__main__.py
+++ b/src/webui/service/__main__.py
@@ -12,15 +12,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os, sys, logging
+import hashlib, sys, logging
 from prometheus_client import start_http_server
 from common.Constants import ServiceNameEnum
 from common.Settings import (
-    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, get_service_baseurl_http,
-    get_service_port_http, get_setting, wait_for_environment_variables)
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    get_service_baseurl_http, get_service_port_http, get_setting, wait_for_environment_variables)
 from webui.service import create_app
 from webui.Config import MAX_CONTENT_LENGTH, HOST, SECRET_KEY, DEBUG
 
+def create_unique_session_cookie_name() -> str:
+    hostname = get_setting('HOSTNAME')
+    if hostname is None: return 'session'
+    hasher = hashlib.blake2b(digest_size=8)
+    hasher.update(hostname.encode('UTF-8'))
+    return 'session:{:s}'.format(str(hasher.hexdigest()))
+
 def main():
     log_level = get_log_level()
     logging.basicConfig(level=log_level)
@@ -33,6 +40,8 @@ def main():
         get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
         get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST     ),
         get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     logger.info('Starting...')
@@ -49,6 +58,7 @@ def main():
     app = create_app(use_config={
         'SECRET_KEY': SECRET_KEY,
         'MAX_CONTENT_LENGTH': MAX_CONTENT_LENGTH,
+        'SESSION_COOKIE_NAME': create_unique_session_cookie_name(),
     }, web_app_root=web_app_root)
     app.run(host=host, port=service_port, debug=debug)
 
diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py
index f1423e92ed63fa778448978167c1c8e646414885..b57c5735d4b26c541d60a885512fe37a2fd626bc 100644
--- a/src/webui/service/device/routes.py
+++ b/src/webui/service/device/routes.py
@@ -16,7 +16,9 @@ from flask import current_app, render_template, Blueprint, flash, session, redir
 from common.proto.context_pb2 import (
     ConfigActionEnum, ConfigRule,
     Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum,
-    Empty)
+    Empty, TopologyId)
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from webui.service.device.forms import AddDeviceForm
@@ -27,16 +29,28 @@ device_client = DeviceClient()
 
 @device.get('/')
 def home():
-    context_uuid = session.get('context_uuid', '-')
-    if context_uuid == "-":
+    if 'context_topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
+
+    context_uuid = session['context_uuid']
+    topology_uuid = session['topology_uuid']
+
     context_client.connect()
-    response: DeviceList = context_client.ListDevices(Empty())
+    json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
+    grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
+    topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}
+    grpc_devices: DeviceList = context_client.ListDevices(Empty())
     context_client.close()
-    return render_template('device/home.html', devices=response.devices,
-                                               dde=DeviceDriverEnum,
-                                               dose=DeviceOperationalStatusEnum)
+
+    devices = [
+        device for device in grpc_devices.devices
+        if device.device_id.device_uuid.uuid in topo_device_uuids
+    ]
+
+    return render_template(
+        'device/home.html', devices=devices, dde=DeviceDriverEnum,
+        dose=DeviceOperationalStatusEnum)
 
 @device.route('add', methods=['GET', 'POST'])
 def add():
diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py
index 51e903d9ec28c5aaac20cd49e2f97dd7044e12bf..5b8831b7732443830a6f9b1ef8f7da92b4c41cc0 100644
--- a/src/webui/service/link/routes.py
+++ b/src/webui/service/link/routes.py
@@ -14,7 +14,9 @@
 
 
 from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for
-from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId
+from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId, TopologyId
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 
 
@@ -23,18 +25,28 @@ context_client = ContextClient()
 
 @link.get('/')
 def home():
-    context_uuid = session.get('context_uuid', '-')
-    if context_uuid == "-":
+    if 'context_topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
-    request = Empty()
+
+    context_uuid = session['context_uuid']
+    topology_uuid = session['topology_uuid']
+
     context_client.connect()
-    response = context_client.ListLinks(request)
+    json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
+    grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
+    topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids}
+    grpc_links: LinkList = context_client.ListLinks(Empty())
     context_client.close()
+
+    links = [
+        link for link in grpc_links.links
+        if link.link_id.link_uuid.uuid in topo_link_uuids
+    ]
+
     return render_template(
-        "link/home.html",
-        links=response.links,
-    )
+        'link/home.html', links=links)
+
 
 @link.route('detail/<path:link_uuid>', methods=('GET', 'POST'))
 def detail(link_uuid: str):
diff --git a/src/webui/service/main/forms.py b/src/webui/service/main/forms.py
index abef11e06d6222c6bbab527f3a41ccdc5918480f..b138592fccd3f65831673912d04aba79f2dd3c72 100644
--- a/src/webui/service/main/forms.py
+++ b/src/webui/service/main/forms.py
@@ -19,20 +19,21 @@ from wtforms import SelectField, FileField, SubmitField
 from wtforms.validators import DataRequired, Length
 
 
-class ContextForm(FlaskForm):
-    context = SelectField(  'Context',
-                            choices=[],
-                            validators=[
-                                DataRequired(),
-                                Length(min=1)
-                            ])
-    
+class ContextTopologyForm(FlaskForm):
+    context_topology = SelectField(
+        'Ctx/Topo',
+        choices=[],
+        validators=[
+            DataRequired(),
+            Length(min=1)
+        ])
     submit = SubmitField('Submit')
 
 
 class DescriptorForm(FlaskForm):
-    descriptors = FileField('Descriptors',
-                            validators=[
-                                FileAllowed(['json'], 'JSON Descriptors only!')
-                            ])
+    descriptors = FileField(
+        'Descriptors',
+        validators=[
+            FileAllowed(['json'], 'JSON Descriptors only!')
+        ])
     submit = SubmitField('Submit')
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 9b1b088579c5b01218316bf1c96b5208ff854609..979d0664bc42221e3559eef498bd53562fe073e7 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -12,10 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import json, logging, re
 from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
-from common.proto.context_pb2 import Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList
+from common.proto.context_pb2 import (
+    Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList, TopologyId, TopologyIdList)
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
@@ -23,7 +26,7 @@ from slice.client.SliceClient import SliceClient
 from webui.service.main.DescriptorTools import (
     format_custom_config_rules, get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices,
     get_descriptors_add_topologies, split_devices_by_rules)
-from webui.service.main.forms import ContextForm, DescriptorForm
+from webui.service.main.forms import ContextTopologyForm, DescriptorForm
 
 main = Blueprint('main', __name__)
 
@@ -154,20 +157,34 @@ def process_descriptors(descriptors):
 def home():
     context_client.connect()
     device_client.connect()
-    response: ContextIdList = context_client.ListContextIds(Empty())
-    context_form: ContextForm = ContextForm()
-    context_form.context.choices.append(('', 'Select...'))
+    context_topology_form: ContextTopologyForm = ContextTopologyForm()
+    context_topology_form.context_topology.choices.append(('', 'Select...'))
 
-    for context in response.context_ids:
-        context_form.context.choices.append((context.context_uuid.uuid, context.context_uuid))
+    ctx_response: ContextIdList = context_client.ListContextIds(Empty())
+    for context_id in ctx_response.context_ids:
+        context_uuid = context_id.context_uuid.uuid
+        topo_response: TopologyIdList = context_client.ListTopologyIds(context_id)
+        for topology_id in topo_response.topology_ids:
+            topology_uuid = topology_id.topology_uuid.uuid
+            context_topology_uuid  = 'ctx[{:s}]/topo[{:s}]'.format(context_uuid, topology_uuid)
+            context_topology_name  = 'Context({:s}):Topology({:s})'.format(context_uuid, topology_uuid)
+            context_topology_entry = (context_topology_uuid, context_topology_name)
+            context_topology_form.context_topology.choices.append(context_topology_entry)
 
-    if context_form.validate_on_submit():
-        session['context_uuid'] = context_form.context.data
-        flash(f'The context was successfully set to `{context_form.context.data}`.', 'success')
-        return redirect(url_for("main.home"))
+    if context_topology_form.validate_on_submit():
+        context_topology_uuid = context_topology_form.context_topology.data
+        if len(context_topology_uuid) > 0:
+            match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid)
+            if match is not None:
+                session['context_topology_uuid'] = context_topology_uuid = match.group(0)
+                session['context_uuid'] = context_uuid = match.group(1)
+                session['topology_uuid'] = topology_uuid = match.group(2)
+                MSG = f'Context({context_uuid})/Topology({topology_uuid}) successfully selected.'
+                flash(MSG, 'success')
+                return redirect(url_for("main.home"))
 
-    if 'context_uuid' in session:
-        context_form.context.data = session['context_uuid']
+    if 'context_topology_uuid' in session:
+        context_topology_form.context_topology.data = session['context_topology_uuid']
 
     descriptor_form: DescriptorForm = DescriptorForm()
     try:
@@ -181,22 +198,39 @@ def home():
         context_client.close()
         device_client.close()
 
-    return render_template('main/home.html', context_form=context_form, descriptor_form=descriptor_form)
+    return render_template(
+        'main/home.html', context_topology_form=context_topology_form, descriptor_form=descriptor_form)
 
 @main.route('/topology', methods=['GET'])
 def topology():
     context_client.connect()
     try:
+        if 'context_topology_uuid' not in session:
+            return jsonify({'devices': [], 'links': []})
+
+        context_uuid = session['context_uuid']
+        topology_uuid = session['topology_uuid']
+
+        json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
+        grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
+
+        topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}
+        topo_link_uuids   = {link_id  .link_uuid  .uuid for link_id   in grpc_topology.link_ids  }
+
         response = context_client.ListDevices(Empty())
-        devices = [{
-            'id': device.device_id.device_uuid.uuid,
-            'name': device.device_id.device_uuid.uuid,
-            'type': device.device_type,
-        } for device in response.devices]
+        devices = []
+        for device in response.devices:
+            if device.device_id.device_uuid.uuid not in topo_device_uuids: continue
+            devices.append({
+                'id': device.device_id.device_uuid.uuid,
+                'name': device.device_id.device_uuid.uuid,
+                'type': device.device_type,
+            })
 
         response = context_client.ListLinks(Empty())
         links = []
         for link in response.links:
+            if link.link_id.link_uuid.uuid not in topo_link_uuids: continue
             if len(link.link_endpoint_ids) != 2:
                 str_link = grpc_message_to_json_string(link)
                 logger.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link))
diff --git a/src/webui/service/static/topology_icons/Acknowledgements.txt b/src/webui/service/static/topology_icons/Acknowledgements.txt
index df5d16dc71d306893818ddfc7f7232fd203c7bcb..b285d225957b0a4e8c14ac4ae5e078597d2a1b27 100644
--- a/src/webui/service/static/topology_icons/Acknowledgements.txt
+++ b/src/webui/service/static/topology_icons/Acknowledgements.txt
@@ -1,6 +1,7 @@
 Network Topology Icons taken from https://vecta.io/symbols
 
-https://symbols.getvecta.com/stencil_240/51_cloud.4d0a827676.png => cloud.png
+https://symbols.getvecta.com/stencil_240/51_cloud.4d0a827676.png => network.png
+    #modified to be grey instead of white
 
 https://symbols.getvecta.com/stencil_240/15_atm-switch.1bbf9a7cca.png => packet-switch.png
 https://symbols.getvecta.com/stencil_241/45_atm-switch.6a7362c1df.png => emu-packet-switch.png
diff --git a/src/webui/service/static/topology_icons/cloud.png b/src/webui/service/static/topology_icons/cloud.png
deleted file mode 100644
index 0f8e9c9714edd1c11904367ef1e9c60ef7ed3295..0000000000000000000000000000000000000000
Binary files a/src/webui/service/static/topology_icons/cloud.png and /dev/null differ
diff --git a/src/webui/service/static/topology_icons/network.png b/src/webui/service/static/topology_icons/network.png
new file mode 100644
index 0000000000000000000000000000000000000000..1f770f7bb2a31834a191e6c8727f059e1f14bbe1
Binary files /dev/null and b/src/webui/service/static/topology_icons/network.png differ
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 5d7801d11880e89869120985307c6b43416f5a05..bee98ee82da3482caf1fad930d03d30572ba287d 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -103,7 +103,7 @@
                 </li>
               </ul>
               <span class="navbar-text" style="color: #fff;">
-                Current context: <b>{{ get_working_context() }}</b>
+                Current Context(<b>{{ get_working_context() }}</b>)/Topology(<b>{{ get_working_topology() }}</b>)
               </span>
             </div>
           </div>
diff --git a/src/webui/service/templates/main/home.html b/src/webui/service/templates/main/home.html
index db390939ff926b5bbfbfc6507b0f4e79695f3693..43b066cc0227801672fc25780f27e3a699338632 100644
--- a/src/webui/service/templates/main/home.html
+++ b/src/webui/service/templates/main/home.html
@@ -19,7 +19,7 @@
 {% block content %}
     <h2>ETSI TeraFlowSDN Controller</h2>
 
-    {% for field, message in context_form.errors.items() %}
+    {% for field, message in context_topology_form.errors.items() %}
         <div class="alert alert-dismissible fade show" role="alert">
         <b>{{ field }}</b>: {{ message }}
         <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>
@@ -28,32 +28,32 @@
     {% endfor %}
 
     <form id="select_context" method="POST" enctype="multipart/form-data">
-        {{ context_form.hidden_tag() }}
+        {{ context_topology_form.hidden_tag() }}
         <fieldset class="form-group">
-            <legend>Select the working context, or upload a JSON descriptors file</legend>
+            <legend>Select the desired Context/Topology</legend>
             <div class="row mb-3">
-                {{ context_form.context.label(class="col-sm-1 col-form-label") }}
+                {{ context_topology_form.context_topology.label(class="col-sm-1 col-form-label") }}
                 <div class="col-sm-5">
-                    {% if context_form.context.errors %}
-                        {{ context_form.context(class="form-select is-invalid") }}
+                    {% if context_topology_form.context_topology.errors %}
+                        {{ context_topology_form.context_topology(class="form-select is-invalid") }}
                         <div class="invalid-feedback">
-                            {% for error in context_form.context.errors %}
+                            {% for error in context_topology_form.context_topology.errors %}
                                 <span>{{ error }}</span>
                             {% endfor %}
                         </div>
                     {% else %}
-                        {{ context_form.context(class="form-select") }}
+                        {{ context_topology_form.context_topology(class="form-select") }}
                     {% endif %}
                 </div>
                 <div class="col-sm-2">
-                    {{ context_form.submit(class='btn btn-primary') }}
+                    {{ context_topology_form.submit(class='btn btn-primary') }}
                 </div>
             </div>
         </fieldset>
     </form>
 
-    <form id="select_context" method="POST" enctype="multipart/form-data">
-        {{ context_form.hidden_tag() }}
+    <form id="upload_descriptors" method="POST" enctype="multipart/form-data">
+        {{ descriptor_form.hidden_tag() }}
         <fieldset class="form-group">
             <legend>Upload a JSON descriptors file</legend>
             <div class="row mb-3">
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html
index 975369ca21d294900c83537916bf527dce4810e9..e1f963e425e23216281068b82da23c809a677296 100644
--- a/src/webui/service/templates/service/detail.html
+++ b/src/webui/service/templates/service/detail.html
@@ -43,6 +43,7 @@
 
 <div class="row mb-3">
     <div class="col-sm-4">
+        <b>Context: </b> {{ service.service_id.context_id.context_uuid.uuid }}<br><br>
         <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br><br>
         <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br><br>
         <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br><br>
@@ -209,13 +210,17 @@
                 <ul>
                 {% for sub_service_id in connection.sub_service_ids %}
                     <li>
+                        {% if sub_service_id.context_id.context_uuid.uuid == session['context_uuid'] %}
                         <a href="{{ url_for('service.detail', service_uuid=sub_service_id.service_uuid.uuid) }}">
-                            {{ sub_service_id.service_uuid.uuid }}
+                            {{ sub_service_id.context_id.context_uuid.uuid }} / {{ sub_service_id.service_uuid.uuid }}
                             <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
                                 <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
                                 <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
                             </svg>
                         </a>
+                        {% else %}
+                            {{ sub_service_id.context_id.context_uuid.uuid }} / {{ sub_service_id.service_uuid.uuid }}
+                        {% endif %}
                     </li>
                 {% endfor %}
                 </ul>
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
index 07734f32304b60365f76413d4689a37b66cc60a3..889e10ce53b4a019b55f714c2442f32f0c2b8e93 100644
--- a/src/webui/service/templates/slice/detail.html
+++ b/src/webui/service/templates/slice/detail.html
@@ -44,7 +44,9 @@
 
 <div class="row mb-3">
     <div class="col-sm-4">
+        <b>Context: </b> {{ slice.slice_id.context_id.context_uuid.uuid }}<br><br>
         <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br><br>
+        <b>Owner: </b> {{ slice.slice_owner.owner_uuid.uuid }}<br><br>
         <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br><br>
     </div>
     <div class="col-sm-8">
@@ -180,13 +182,17 @@
                 {% for service_id in slice.slice_service_ids %}
                 <tr>
                     <td>
+                        {% if service_id.context_id.context_uuid.uuid == session['context_uuid'] %}
                         <a href="{{ url_for('service.detail', service_uuid=service_id.service_uuid.uuid) }}">
-                            {{ service_id.service_uuid.uuid }}
+                            {{ service_id.context_id.context_uuid.uuid }} / {{ service_id.service_uuid.uuid }}
                             <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
                                 <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
                                 <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
                             </svg>
                         </a>
+                        {% else %}
+                            {{ service_id.context_id.context_uuid.uuid }} / {{ service_id.service_uuid.uuid }}
+                        {% endif %}
                     </td>
                 </tr>
                 {% endfor %}
@@ -204,13 +210,17 @@
                 {% for subslice_id in slice.slice_subslice_ids %}
                 <tr>
                     <td>
+                        {% if subslice_id.context_id.context_uuid.uuid == session['context_uuid'] %}
                         <a href="{{ url_for('slice.detail', slice_uuid=subslice_id.slice_uuid.uuid) }}">
-                            {{ subslice_id.slice_uuid.uuid }}
+                            {{ subslice_id.context_id.context_uuid.uuid }} / {{ subslice_id.slice_uuid.uuid }}
                             <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
                                 <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
                                 <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
                             </svg>
                         </a>
+                        {% else %}
+                            {{ subslice_id.context_id.context_uuid.uuid }} / {{ subslice_id.slice_uuid.uuid }}
+                        {% endif %}
                     </td>
                 </tr>
                 {% endfor %}