diff --git a/deploy/all.sh b/deploy/all.sh
index 6f8331b769b6f84a13ac66b48ca2f861a8308ce5..9584dd32d121b7f63e7c7f177bf7bee8c287b4c9 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -147,6 +147,15 @@ export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
 
 
+# ----- K8s Observability ------------------------------------------------------
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -160,6 +169,9 @@ export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
 # Deploy QuestDB
 ./deploy/qdb.sh
 
+# Expose Dashboard
+./deploy/expose_dashboard.sh
+
 # Deploy TeraFlowSDN
 ./deploy/tfs.sh
 
diff --git a/deploy/crdb.sh b/deploy/crdb.sh
index 216339117d2156d0ae1beddb5a1d6a7ccbe33219..414de523d10f7d1edb99799e1f5889b340d8ad04 100755
--- a/deploy/crdb.sh
+++ b/deploy/crdb.sh
@@ -167,6 +167,11 @@ function crdb_drop_database_single() {
 }
 
 function crdb_deploy_cluster() {
+    echo "CockroachDB Operator Namespace"
+    echo ">>> Create CockroachDB Operator Namespace (if missing)"
+    kubectl apply -f "${CRDB_MANIFESTS_PATH}/pre_operator.yaml"
+    echo
+
     echo "Cockroach Operator CRDs"
     echo ">>> Apply Cockroach Operator CRDs (if they are missing)"
     cp "${CRDB_MANIFESTS_PATH}/crds.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml"
diff --git a/deploy/expose_dashboard.sh b/deploy/expose_dashboard.sh
new file mode 100755
index 0000000000000000000000000000000000000000..60b41c7b75d4f96a22151b1d4d68ba53c75a265c
--- /dev/null
+++ b/deploy/expose_dashboard.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+function expose_dashboard() {
+    echo "Prometheus Port Mapping"
+    echo ">>> Expose Prometheus HTTP Mgmt GUI port (9090->${PROM_EXT_PORT_HTTP})"
+    PROM_PORT_HTTP=$(kubectl --namespace monitoring get service prometheus-k8s -o 'jsonpath={.spec.ports[?(@.name=="web")].port}')
+    PATCH='{"data": {"'${PROM_EXT_PORT_HTTP}'": "monitoring/prometheus-k8s:'${PROM_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${PROM_EXT_PORT_HTTP}', "hostPort": '${PROM_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo "Grafana Port Mapping"
+    echo ">>> Expose Grafana HTTP Mgmt GUI port (3000->${GRAF_EXT_PORT_HTTP})"
+    GRAF_PORT_HTTP=$(kubectl --namespace monitoring get service grafana -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+    PATCH='{"data": {"'${GRAF_EXT_PORT_HTTP}'": "monitoring/grafana:'${GRAF_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${GRAF_EXT_PORT_HTTP}', "hostPort": '${GRAF_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+expose_dashboard
diff --git a/deploy/nats.sh b/deploy/nats.sh
index aa082b54ba8806c48f9b5a04c61f110b93b03d6a..b730cec4af66920e5a7d8a2235e63beff70e8694 100755
--- a/deploy/nats.sh
+++ b/deploy/nats.sh
@@ -53,7 +53,7 @@ function nats_deploy_single() {
         echo ">>> NATS is present; skipping step."
     else
         echo ">>> Deploy NATS"
-        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image.tag=2.9-alpine
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
 
         echo ">>> Waiting NATS statefulset to be created..."
         while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 4c6dc95d2e20dd92c73692aefd46c6fe4b348601..be83d7f5b2669abe8330adefa8a8feac27a1dab8 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -106,6 +106,15 @@ export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kp
 export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 
 
+# ----- K8s Observability ------------------------------------------------------
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -241,7 +250,8 @@ for COMPONENT in $TFS_COMPONENTS; do
 
     echo "  Adapting '$COMPONENT' manifest file..."
     MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
-    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    # cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
 
     if [ "$COMPONENT" == "pathcomp" ]; then
         IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
@@ -335,7 +345,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     # Exposed through the ingress controller "tfs-ingress"
     GRAFANA_URL="127.0.0.1:${EXT_HTTP_PORT}/grafana"
 
-    # Default Grafana credentials
+    # Default Grafana credentials when installed with the `monitoring` addon
     GRAFANA_USERNAME="admin"
     GRAFANA_PASSWORD="admin"
 
@@ -414,6 +424,20 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     }' ${GRAFANA_URL_UPDATED}/api/datasources
     printf "\n\n"
 
+    # adding the datasource of the metrics collection framework
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "prometheus",
+        "name"     : "Prometheus",
+        "url"      : "http://prometheus-k8s.monitoring.svc:9090",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "httpMethod"               : "POST"
+        }
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    printf "\n\n"
+
     echo ">> Creating dashboards..."
     # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
     curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
diff --git a/manifests/automationservice.yaml b/manifests/automationservice.yaml
deleted file mode 120000
index 5e8d3c1c82db0c03119f29865e2a7edabcdfb0eb..0000000000000000000000000000000000000000
--- a/manifests/automationservice.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../src/automation/target/kubernetes/kubernetes.yml
\ No newline at end of file
diff --git a/manifests/automationservice.yaml b/manifests/automationservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..73e6b1d7be076dbcf55014ae3accbc1e29e0c8e8
--- /dev/null
+++ b/manifests/automationservice.yaml
@@ -0,0 +1,125 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+  labels:
+    app.kubernetes.io/name: automationservice
+    app: automationservice
+  name: automationservice
+spec:
+  ports:
+    - name: grpc
+      port: 5050
+      targetPort: 5050
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 8080
+  selector:
+    app.kubernetes.io/name: automationservice
+  type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  annotations:
+    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+  labels:
+    app: automationservice
+    app.kubernetes.io/name: automationservice
+  name: automationservice
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: automationservice
+  template:
+    metadata:
+      annotations:
+        app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+      labels:
+        app: automationservice
+        app.kubernetes.io/name: automationservice
+    spec:
+      containers:
+        - env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
+            - name: DEVICE_SERVICE_HOST
+              value: deviceservice
+          image: labs.etsi.org:5050/tfs/controller/automation:0.2.0
+          imagePullPolicy: Always
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/live
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          name: automationservice
+          ports:
+            - containerPort: 5050
+              name: grpc
+              protocol: TCP
+            - containerPort: 8080
+              name: metrics
+              protocol: TCP
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/ready
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: automationservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: automationservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
diff --git a/manifests/cockroachdb/pre_operator.yaml b/manifests/cockroachdb/pre_operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..16718a77918491170502a5cbb864a6fda39c734a
--- /dev/null
+++ b/manifests/cockroachdb/pre_operator.yaml
@@ -0,0 +1,19 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: cockroach-operator
+  name: cockroach-operator-system
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index b1e6eb89dc4ec92409dbd05bbe668987ea93828f..96735bf5f89f682f31131c123ee9884a1becbfdb 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -20,9 +20,11 @@ spec:
   selector:
     matchLabels:
       app: contextservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
       labels:
         app: contextservice
     spec:
@@ -52,11 +54,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:1010"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 250m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 1000m
+            memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
@@ -77,3 +79,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: contextservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: contextservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index ca2c81f0f2e5d874066464ab0537adeec734cfbb..ad54f4b6c2682c381c1c5238a013e1d12e177764 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -45,11 +45,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:2020"]
         resources:
           requests:
-            cpu: 50m
+            cpu: 128m
             memory: 64Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 256m
+            memory: 128Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml
index b94e11e725757fa2ec67de19f98ecfa6a03f085b..3f65c2c857a39f2b7a5ebeaccd9ddfd4916f2487 100644
--- a/manifests/load_generatorservice.yaml
+++ b/manifests/load_generatorservice.yaml
@@ -44,11 +44,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:50052"]
         resources:
           requests:
-            cpu: 50m
+            cpu: 256m
             memory: 64Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 512m
+            memory: 128Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml
index fd3599f429f48ebb3cf3f8d802f8f61f00e1b41d..3ba12750b20a7093a570748e67a93922316a66f6 100644
--- a/manifests/pathcompservice.yaml
+++ b/manifests/pathcompservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: pathcompservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -53,6 +53,8 @@ spec:
       - name: backend
         image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest
         imagePullPolicy: Always
+        ports:
+        - containerPort: 8081
         #readinessProbe:
         #  httpGet:
         #    path: /health
@@ -96,3 +98,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: pathcompservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: pathcompservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml
deleted file mode 120000
index bb28f6e2cff4c6b50e44f049dec6a53d31922e86..0000000000000000000000000000000000000000
--- a/manifests/policyservice.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../src/policy/target/kubernetes/kubernetes.yml
\ No newline at end of file
diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..72da09ecaf1de9d080d686c63c0f18c88f09e8b4
--- /dev/null
+++ b/manifests/policyservice.yaml
@@ -0,0 +1,129 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+  labels:
+    app.kubernetes.io/name: policyservice
+    app: policyservice
+  name: policyservice
+spec:
+  ports:
+    - name: metrics
+      port: 9192
+      targetPort: 8080
+    - name: grpc
+      port: 6060
+      targetPort: 6060
+  selector:
+    app.kubernetes.io/name: policyservice
+  type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  annotations:
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+  labels:
+    app: policyservice
+    app.kubernetes.io/name: policyservice
+  name: policyservice
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: policyservice
+  template:
+    metadata:
+      annotations:
+        app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+        app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+      labels:
+        app: policyservice
+        app.kubernetes.io/name: policyservice
+    spec:
+      containers:
+        - env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: SERVICE_SERVICE_HOST
+              value: serviceservice
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
+            - name: MONITORING_SERVICE_HOST
+              value: monitoringservice
+          image: labs.etsi.org:5050/tfs/controller/policy:0.1.0
+          imagePullPolicy: Always
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/live
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          name: policyservice
+          ports:
+            - containerPort: 8080
+              name: metrics
+              protocol: TCP
+            - containerPort: 6060
+              name: grpc-server
+              protocol: TCP
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/ready
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: policyservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: policyservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
\ No newline at end of file
diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml
index 06c3390f4fddbcb6f8adec5d931989cc8a41cc68..3d38d59603918b7ea35004e580e21b8a03ce2878 100644
--- a/manifests/servicemonitors.yaml
+++ b/manifests/servicemonitors.yaml
@@ -243,3 +243,61 @@ spec:
     any: false
     matchNames:
     - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-policyservice-metric
+  labels:
+    app: policyservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: policyservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /q/metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-automationservice-metric
+  labels:
+    app: automationservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: automationservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /q/metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 3fa4a6e0dc256ba964fd4ee26a8b7095bb2303f4..ce90aa18854522f1c08e213cb554c70af70bac36 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: serviceservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -45,11 +45,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:3030"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 32m
+            memory: 32Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 128m
+            memory: 64Mi
 ---
 apiVersion: v1
 kind: Service
@@ -70,3 +70,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: serviceservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: serviceservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index 49e2b5943d20586941f80e8fc4b5c32c99d70f8e..8f312e8e0c89c5b8ed1923622078ea16b6bd876e 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: sliceservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -50,11 +50,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:4040"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 32m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 128m
+            memory: 256Mi
 ---
 apiVersion: v1
 kind: Service
@@ -75,3 +75,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: sliceservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: sliceservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index 234075f738abd880a7c269cb07b72ad6c635d4c6..b6ddfc0a91ae5316969079c517e148f63fb18b61 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -62,7 +62,7 @@ spec:
             cpu: 500m
             memory: 512Mi
       - name: grafana
-        image: grafana/grafana:8.5.11
+        image: grafana/grafana:8.5.22
         imagePullPolicy: IfNotPresent
         ports:
           - containerPort: 3000
diff --git a/my_deploy.sh b/my_deploy.sh
index 22a7ae8155135f8d81f2fa12d71f80d8dd7c57e9..d6f3513e9b2090905b7814c4563644ecda7bd2c6 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -29,7 +29,7 @@ export TFS_IMAGE_TAG="dev"
 export TFS_K8S_NAMESPACE="tfs"
 
 # Set additional manifest files to be applied after the deployment
-export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicemonitors.yaml"
 
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
@@ -115,3 +115,12 @@ export QDB_DROP_TABLES_IF_EXIST=""
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/proto/context.proto b/proto/context.proto
index 2dfbb7805eb444ee94e27bb00ca05d9a1c83b8ec..3b25e6361766ee4c2b52e15aab215409f40cbb56 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -40,7 +40,7 @@ service ContextService {
   rpc SetDevice          (Device        ) returns (       DeviceId        ) {}
   rpc RemoveDevice       (DeviceId      ) returns (       Empty           ) {}
   rpc GetDeviceEvents    (Empty         ) returns (stream DeviceEvent     ) {}
-
+  rpc SelectDevice       (DeviceFilter  ) returns (       DeviceList      ) {}
   rpc ListEndPointNames  (EndPointIdList) returns (       EndPointNameList) {}
 
   rpc ListLinkIds        (Empty         ) returns (       LinkIdList      ) {}
@@ -57,6 +57,7 @@ service ContextService {
   rpc UnsetService       (Service       ) returns (       ServiceId       ) {}
   rpc RemoveService      (ServiceId     ) returns (       Empty           ) {}
   rpc GetServiceEvents   (Empty         ) returns (stream ServiceEvent    ) {}
+  rpc SelectService      (ServiceFilter ) returns (       ServiceList     ) {}
 
   rpc ListSliceIds       (ContextId     ) returns (       SliceIdList     ) {}
   rpc ListSlices         (ContextId     ) returns (       SliceList       ) {}
@@ -65,6 +66,7 @@ service ContextService {
   rpc UnsetSlice         (Slice         ) returns (       SliceId         ) {}
   rpc RemoveSlice        (SliceId       ) returns (       Empty           ) {}
   rpc GetSliceEvents     (Empty         ) returns (stream SliceEvent      ) {}
+  rpc SelectSlice        (SliceFilter   ) returns (       SliceList       ) {}
 
   rpc ListConnectionIds  (ServiceId     ) returns (       ConnectionIdList) {}
   rpc ListConnections    (ServiceId     ) returns (       ConnectionList  ) {}
@@ -208,6 +210,13 @@ message DeviceList {
   repeated Device devices = 1;
 }
 
+message DeviceFilter {
+  DeviceIdList device_ids = 1;
+  bool include_endpoints = 2;
+  bool include_config_rules = 3;
+  bool include_components = 4;
+}
+
 message DeviceEvent {
   Event event = 1;
   DeviceId device_id = 2;
@@ -288,6 +297,13 @@ message ServiceList {
   repeated Service services = 1;
 }
 
+message ServiceFilter {
+  ServiceIdList service_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_config_rules = 4;
+}
+
 message ServiceEvent {
   Event event = 1;
   ServiceId service_id = 2;
@@ -342,6 +358,15 @@ message SliceList {
   repeated Slice slices = 1;
 }
 
+message SliceFilter {
+  SliceIdList slice_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_service_ids = 4;
+  bool include_subslice_ids = 5;
+  bool include_config_rules = 6;
+}
+
 message SliceEvent {
   Event event = 1;
   SliceId slice_id = 2;
diff --git a/scripts/old/open_dashboard.sh b/scripts/old/open_dashboard.sh
old mode 100755
new mode 100644
index 4ea206f4538c27fe8563ce5c30ed837781f8d362..2ff15684a499fe390816ebb8e4859cad49d43d32
--- a/scripts/old/open_dashboard.sh
+++ b/scripts/old/open_dashboard.sh
@@ -16,9 +16,7 @@
 
 # this script opens the dashboard
 
-K8S_NAMESPACE=${K8S_NAMESPACE:-'tfs'}
-
-GRAFANA_IP=$(kubectl get service/webuiservice -n ${TFS_K8S_NAMESPACE} -o jsonpath='{.spec.clusterIP}')
+GRAFANA_IP=$(kubectl get service/grafana -n monitoring -o jsonpath='{.spec.clusterIP}')
 GRAFANA_PORT=3000 #$(kubectl get service webuiservice --namespace $TFS_K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==3000)].nodePort}')
 URL=http://${GRAFANA_IP}:${GRAFANA_PORT}
 
diff --git a/src/automation/pom.xml b/src/automation/pom.xml
index 2fd5fd263a698145f39c37ed358982de58dfee77..7dfc3dac438fa5df740381be0ef595a5734d7699 100644
--- a/src/automation/pom.xml
+++ b/src/automation/pom.xml
@@ -174,6 +174,11 @@
       <scope>test</scope>
     </dependency>
 
+    <dependency>
+      <groupId>io.quarkus</groupId>
+      <artifactId>quarkus-smallrye-metrics</artifactId>
+  </dependency>
+
   </dependencies>
 
   <build>
diff --git a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
index 51857bb3dba6422fe6ffc93930e0e2bf65b1a223..2f9054cd8296579b3e391aae84ec16ad1f460bdb 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
@@ -27,6 +27,10 @@ import io.quarkus.grpc.GrpcService;
 import io.smallrye.mutiny.Uni;
 import javax.inject.Inject;
 
+import org.eclipse.microprofile.metrics.MetricUnits;
+import org.eclipse.microprofile.metrics.annotation.Counted;
+import org.eclipse.microprofile.metrics.annotation.Timed;
+
 @GrpcService
 public class AutomationGatewayImpl implements AutomationGateway {
 
@@ -40,18 +44,24 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpGetDeviceRole_counter")
+    @Timed(name = "automation_ztpGetDeviceRole_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRole> ztpGetDeviceRole(Automation.DeviceRoleId request) {
         return Uni.createFrom()
                 .item(() -> Automation.DeviceRole.newBuilder().setDevRoleId(request).build());
     }
 
     @Override
+    @Counted(name = "automation_ztpGetDeviceRolesByDeviceId_counter")
+    @Timed(name = "automation_ztpGetDeviceRolesByDeviceId_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleList> ztpGetDeviceRolesByDeviceId(
             ContextOuterClass.DeviceId request) {
         return Uni.createFrom().item(() -> Automation.DeviceRoleList.newBuilder().build());
     }
 
     @Override
+    @Counted(name = "automation_ztpAdd_counter")
+    @Timed(name = "automation_ztpAdd_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleState> ztpAdd(Automation.DeviceRole request) {
         final var devRoleId = request.getDevRoleId().getDevRoleId().getUuid();
         final var deviceId = serializer.deserialize(request.getDevRoleId().getDevId());
@@ -63,6 +73,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpUpdate_counter")
+    @Timed(name = "automation_ztpUpdate_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<DeviceRoleState> ztpUpdate(DeviceRoleConfig request) {
         final var devRoleId = request.getDevRole().getDevRoleId().getDevRoleId().getUuid();
         final var deviceId = serializer.deserialize(request.getDevRole().getDevRoleId().getDevId());
@@ -75,6 +87,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpDelete_counter")
+    @Timed(name = "automation_ztpDelete_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleState> ztpDelete(Automation.DeviceRole request) {
         final var devRoleId = request.getDevRoleId().getDevRoleId().getUuid();
         return automationService
@@ -84,6 +98,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpDeleteAll_counter")
+    @Timed(name = "automation_ztpDeleteAll_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceDeletionResult> ztpDeleteAll(ContextOuterClass.Empty empty) {
         return Uni.createFrom().item(() -> Automation.DeviceDeletionResult.newBuilder().build());
     }
diff --git a/src/automation/target/kubernetes/kubernetes.yml b/src/automation/target/kubernetes/kubernetes.yml
index 4dacf3998c3991a441dc374ca6c6abc29e8d3b80..73e6b1d7be076dbcf55014ae3accbc1e29e0c8e8 100644
--- a/src/automation/target/kubernetes/kubernetes.yml
+++ b/src/automation/target/kubernetes/kubernetes.yml
@@ -27,8 +27,9 @@ spec:
     - name: grpc
       port: 5050
       targetPort: 5050
-    - name: http
-      port: 8080
+    - name: metrics
+      protocol: TCP
+      port: 9192
       targetPort: 8080
   selector:
     app.kubernetes.io/name: automationservice
@@ -84,7 +85,7 @@ spec:
               name: grpc
               protocol: TCP
             - containerPort: 8080
-              name: http
+              name: metrics
               protocol: TCP
           readinessProbe:
             failureThreshold: 3
@@ -96,3 +97,29 @@ spec:
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: automationservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: automationservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
diff --git a/src/common/message_broker/backend/nats/NatsBackendThread.py b/src/common/message_broker/backend/nats/NatsBackendThread.py
index e59e4d6835ef662e4b0ed9f92d79a45c22954a6f..0bedd2b242f7eeaa1585d0eb41c5a0bd9efe07e5 100644
--- a/src/common/message_broker/backend/nats/NatsBackendThread.py
+++ b/src/common/message_broker/backend/nats/NatsBackendThread.py
@@ -12,10 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import asyncio, nats, nats.errors, queue, threading
+import asyncio, logging, nats, nats.errors, queue, threading
 from typing import List
 from common.message_broker.Message import Message
 
+LOGGER = logging.getLogger(__name__)
+
 class NatsBackendThread(threading.Thread):
     def __init__(self, nats_uri : str) -> None:
         self._nats_uri = nats_uri
@@ -32,7 +34,9 @@ class NatsBackendThread(threading.Thread):
         self._tasks_terminated.set()
 
     async def _run_publisher(self) -> None:
+        LOGGER.info('[_run_publisher] NATS URI: {:s}'.format(str(self._nats_uri)))
         client = await nats.connect(servers=[self._nats_uri])
+        LOGGER.info('[_run_publisher] Connected!')
         while not self._terminate.is_set():
             try:
                 message : Message = await self._publish_queue.get()
@@ -47,8 +51,11 @@ class NatsBackendThread(threading.Thread):
     async def _run_subscriber(
         self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event
     ) -> None:
+        LOGGER.info('[_run_subscriber] NATS URI: {:s}'.format(str(self._nats_uri)))
         client = await nats.connect(servers=[self._nats_uri])
+        LOGGER.info('[_run_subscriber] Connected!')
         subscription = await client.subscribe(topic_name)
+        LOGGER.info('[_run_subscriber] Subscribed!')
         while not self._terminate.is_set() and not unsubscribe.is_set():
             try:
                 message = await subscription.next_msg(timeout)
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
index 0e1d8c7371e87b47bfc47a4242e00039add48e7f..1e238510c98b83bebde8167711b988d7476e5a99 100644
--- a/src/common/tools/descriptor/Loader.py
+++ b/src/common/tools/descriptor/Loader.py
@@ -222,13 +222,13 @@ class DescriptorLoader:
         self.__topologies_add = get_descriptors_add_topologies(self.__topologies)
 
         if self.__dummy_mode:
-            self._dummy_mode()
+            self._load_dummy_mode()
         else:
-            self._normal_mode()
+            self._load_normal_mode()
         
         return self.__results
 
-    def _dummy_mode(self) -> None:
+    def _load_dummy_mode(self) -> None:
         # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
         self.__ctx_cli.connect()
         self._process_descr('context',    'add',    self.__ctx_cli.SetContext,    Context,    self.__contexts_add  )
@@ -242,7 +242,7 @@ class DescriptorLoader:
         self._process_descr('topology',   'update', self.__ctx_cli.SetTopology,   Topology,   self.__topologies    )
         #self.__ctx_cli.close()
 
-    def _normal_mode(self) -> None:
+    def _load_normal_mode(self) -> None:
         # Normal mode: follows the automated workflows in the different components
         assert len(self.__connections) == 0, 'in normal mode, connections should not be set'
 
@@ -321,7 +321,35 @@ class DescriptorLoader:
             response = self.__ctx_cli.ListSlices(ContextId(**json_context_id(context_uuid)))
             assert len(response.slices) == num_slices
 
-    def unload(self) -> None:
+    def _unload_dummy_mode(self) -> None:
+        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
+        self.__ctx_cli.connect()
+
+        for _, slice_list in self.slices.items():
+            for slice_ in slice_list:
+                self.__ctx_cli.RemoveSlice(SliceId(**slice_['slice_id']))
+
+        for _, service_list in self.services.items():
+            for service in service_list:
+                self.__ctx_cli.RemoveService(ServiceId(**service['service_id']))
+
+        for link in self.links:
+            self.__ctx_cli.RemoveLink(LinkId(**link['link_id']))
+
+        for device in self.devices:
+            self.__ctx_cli.RemoveDevice(DeviceId(**device['device_id']))
+
+        for _, topology_list in self.topologies.items():
+            for topology in topology_list:
+                self.__ctx_cli.RemoveTopology(TopologyId(**topology['topology_id']))
+
+        for context in self.contexts:
+            self.__ctx_cli.RemoveContext(ContextId(**context['context_id']))
+
+        #self.__ctx_cli.close()
+
+    def _unload_normal_mode(self) -> None:
+        # Normal mode: follows the automated workflows in the different components
         self.__ctx_cli.connect()
         self.__dev_cli.connect()
         self.__svc_cli.connect()
@@ -348,6 +376,17 @@ class DescriptorLoader:
         for context in self.contexts:
             self.__ctx_cli.RemoveContext(ContextId(**context['context_id']))
 
+        #self.__ctx_cli.close()
+        #self.__dev_cli.close()
+        #self.__svc_cli.close()
+        #self.__slc_cli.close()
+
+    def unload(self) -> None:
+        if self.__dummy_mode:
+            self._unload_dummy_mode()
+        else:
+            self._unload_normal_mode()
+
 def compose_notifications(results : TypeResults) -> TypeNotificationList:
     notifications = []
     for entity_name, action_name, num_ok, error_list in results:
diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py
index 7c3832d6b3ea7de0a495faee143b73179e8da5b9..13d9dc0035b45845bf11367e02c8830b5151c1d6 100644
--- a/src/context/client/ContextClient.py
+++ b/src/context/client/ContextClient.py
@@ -21,11 +21,11 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
     Empty, EndPointIdList, EndPointNameList,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
     Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.context_pb2_grpc import ContextServiceStub
 from common.proto.context_policy_pb2_grpc import ContextPolicyServiceStub
@@ -185,6 +185,13 @@ class ContextClient:
         LOGGER.debug('RemoveDevice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectDevice(self, request: DeviceFilter) -> DeviceList:
+        LOGGER.debug('SelectDevice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectDevice(request)
+        LOGGER.debug('SelectDevice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetDeviceEvents(self, request: Empty) -> Iterator[DeviceEvent]:
         LOGGER.debug('GetDeviceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
@@ -283,6 +290,13 @@ class ContextClient:
         LOGGER.debug('RemoveService result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectService(self, request: ServiceFilter) -> ServiceList:
+        LOGGER.debug('SelectService request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectService(request)
+        LOGGER.debug('SelectService result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetServiceEvents(self, request: Empty) -> Iterator[ServiceEvent]:
         LOGGER.debug('GetServiceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
@@ -332,6 +346,13 @@ class ContextClient:
         LOGGER.debug('RemoveSlice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectSlice(self, request: SliceFilter) -> SliceList:
+        LOGGER.debug('SelectSlice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectSlice(request)
+        LOGGER.debug('SelectSlice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetSliceEvents(self, request: Empty) -> Iterator[SliceEvent]:
         LOGGER.debug('GetSliceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py
index 6fe00f917cf8b338f0934e2a268fa757d2055865..789ee7a78c6bcff3e62a6dd373bd58dbb2e7a960 100644
--- a/src/context/service/ContextServiceServicerImpl.py
+++ b/src/context/service/ContextServiceServicerImpl.py
@@ -18,11 +18,11 @@ from common.message_broker.MessageBroker import MessageBroker
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
     Empty, EndPointIdList, EndPointNameList, EventTypeEnum,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
     Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
 from common.proto.context_pb2_grpc import ContextServiceServicer
@@ -31,13 +31,13 @@ from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_m
 from .database.Connection import (
     connection_delete, connection_get, connection_list_ids, connection_list_objs, connection_set)
 from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set
-from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set
+from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_select, device_set
 from .database.EndPoint import endpoint_list_names
 from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set
 from .database.PolicyRule import (
     policyrule_delete, policyrule_get, policyrule_list_ids, policyrule_list_objs, policyrule_set)
-from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set
-from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset
+from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_select, service_set
+from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_select, slice_set, slice_unset
 from .database.Topology import (
     topology_delete, topology_get, topology_get_details, topology_list_ids, topology_list_objs, topology_set)
 from .Events import (
@@ -161,6 +161,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectDevices(self, request : DeviceFilter, context : grpc.ServicerContext) -> DeviceList:
+        return DeviceList(devices=device_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
         for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
@@ -235,6 +239,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectService(self, request : ServiceFilter, context : grpc.ServicerContext) -> ServiceList:
+        return ServiceList(services=service_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
         for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
@@ -278,6 +286,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectSlice(self, request : SliceFilter, context : grpc.ServicerContext) -> SliceList:
+        return SliceList(slices=slice_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
         for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py
index a3edb8ea2838d9203a810677da495893a2cd6973..80d3b3a6d437986741ee5308205d8a902e897c40 100644
--- a/src/context/service/database/Connection.py
+++ b/src/context/service/database/Connection.py
@@ -16,7 +16,7 @@ import datetime, logging, re
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.exc import IntegrityError
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Connection, ConnectionId, ServiceId
@@ -40,7 +40,11 @@ def connection_list_ids(db_engine : Engine, request : ServiceId) -> List[Dict]:
 def connection_list_objs(db_engine : Engine, request : ServiceId) -> List[Dict]:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all()
+        obj_list : List[ConnectionModel] = session.query(ConnectionModel)\
+            .options(selectinload(ConnectionModel.connection_service))\
+            .options(selectinload(ConnectionModel.connection_endpoints))\
+            .options(selectinload(ConnectionModel.connection_subservices))\
+            .filter_by(service_uuid=service_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -48,6 +52,9 @@ def connection_get(db_engine : Engine, request : ConnectionId) -> Dict:
     connection_uuid = connection_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[ConnectionModel] = session.query(ConnectionModel)\
+            .options(selectinload(ConnectionModel.connection_service))\
+            .options(selectinload(ConnectionModel.connection_endpoints))\
+            .options(selectinload(ConnectionModel.connection_subservices))\
             .filter_by(connection_uuid=connection_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py
index 9e05e54b38d3772ece2d87de0d98fb5a216088de..4654095034749e1de985705b242ba9fa05a82f6a 100644
--- a/src/context/service/database/Context.py
+++ b/src/context/service/database/Context.py
@@ -15,7 +15,7 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Context, ContextId
@@ -34,14 +34,22 @@ def context_list_ids(db_engine : Engine) -> List[Dict]:
 
 def context_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ContextModel] = session.query(ContextModel).all()
+        obj_list : List[ContextModel] = session.query(ContextModel)\
+            .options(selectinload(ContextModel.topologies))\
+            .options(selectinload(ContextModel.services))\
+            .options(selectinload(ContextModel.slices))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def context_get(db_engine : Engine, request : ContextId) -> Dict:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[ContextModel] = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
+        obj : Optional[ContextModel] = session.query(ContextModel)\
+            .options(selectinload(ContextModel.topologies))\
+            .options(selectinload(ContextModel.services))\
+            .options(selectinload(ContextModel.slices))\
+            .filter_by(context_uuid=context_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py
index c5a19c9c4b0bca4f85ffe1211dbefc6b218d518e..3e106bc158ab804c7eada7284e9d1b883eb66264 100644
--- a/src/context/service/database/Device.py
+++ b/src/context/service/database/Device.py
@@ -15,12 +15,12 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
-from common.proto.context_pb2 import Device, DeviceId, TopologyId
-from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.proto.context_pb2 import Device, DeviceFilter, DeviceId, TopologyId
+#from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Device import json_device_id
 from context.service.database.uuids.Topology import topology_get_uuid
 from .models.DeviceModel import DeviceModel
@@ -43,14 +43,22 @@ def device_list_ids(db_engine : Engine) -> List[Dict]:
 
 def device_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[DeviceModel] = session.query(DeviceModel).all()
+        obj_list : List[DeviceModel] = session.query(DeviceModel)\
+            .options(selectinload(DeviceModel.endpoints))\
+            .options(selectinload(DeviceModel.config_rules))\
+            .all()
+            #.options(selectinload(DeviceModel.components))\
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def device_get(db_engine : Engine, request : DeviceId) -> Dict:
     device_uuid = device_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[DeviceModel] = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none()
+        obj : Optional[DeviceModel] = session.query(DeviceModel)\
+            .options(selectinload(DeviceModel.endpoints))\
+            .options(selectinload(DeviceModel.config_rules))\
+            .filter_by(device_uuid=device_uuid).one_or_none()
+            #.options(selectinload(DeviceModel.components))\
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -163,7 +171,9 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
             endpoint_updates = session.execute(stmt).fetchall()
             updated_endpoints = any([(updated_at > created_at) for created_at,updated_at in endpoint_updates])
 
-        if len(related_topologies) > 0:
+        if not updated or len(related_topologies) > 1:
+            # Only update topology-device relations when device is created (not updated) or when endpoints are
+            # modified (len(related_topologies) > 1).
             session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing(
                 index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid]
             ))
@@ -182,3 +192,22 @@ def device_delete(db_engine : Engine, request : DeviceId) -> Tuple[Dict, bool]:
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_device_id(device_uuid),deleted
+
+def device_select(db_engine : Engine, request : DeviceFilter) -> List[Dict]:
+    device_uuids = [
+        device_get_uuid(device_id, allow_random=False)
+        for device_id in request.device_ids.device_ids
+    ]
+    dump_params = dict(
+        include_endpoints   =request.include_endpoints,
+        include_config_rules=request.include_config_rules,
+        include_components  =request.include_components,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(DeviceModel)
+        if request.include_endpoints   : query = query.options(selectinload(DeviceModel.endpoints))
+        if request.include_config_rules: query = query.options(selectinload(DeviceModel.config_rules))
+        #if request.include_components  : query = query.options(selectinload(DeviceModel.components))
+        obj_list : List[DeviceModel] = query.filter(DeviceModel.device_uuid.in_(device_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/EndPoint.py b/src/context/service/database/EndPoint.py
index e2f86893abdf62c9675a83b2a80ceed1227b85d4..b0df3bb8101a7b64a148e916178b1c9a77d511af 100644
--- a/src/context/service/database/EndPoint.py
+++ b/src/context/service/database/EndPoint.py
@@ -14,7 +14,7 @@
 
 import logging
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List
 from common.proto.context_pb2 import EndPointIdList
@@ -29,7 +29,8 @@ def endpoint_list_names(db_engine : Engine, request : EndPointIdList) -> List[Di
         for endpoint_id in request.endpoint_ids
     }
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[EndPointModel] = \
-            session.query(EndPointModel).filter(EndPointModel.endpoint_uuid.in_(endpoint_uuids)).all()
+        obj_list : List[EndPointModel] = session.query(EndPointModel)\
+            .options(selectinload(EndPointModel.device))\
+            .filter(EndPointModel.endpoint_uuid.in_(endpoint_uuids)).all()
         return [obj.dump_name() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py
index 299827dbdde6f9961d55be5f69f4e359f4e876a7..f5bfc9dea5fb81fa8becfedc8ce1e4e0f59e7292 100644
--- a/src/context/service/database/Link.py
+++ b/src/context/service/database/Link.py
@@ -15,7 +15,7 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.proto.context_pb2 import Link, LinkId
@@ -36,14 +36,18 @@ def link_list_ids(db_engine : Engine) -> List[Dict]:
 
 def link_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[LinkModel] = session.query(LinkModel).all()
+        obj_list : List[LinkModel] = session.query(LinkModel)\
+            .options(selectinload(LinkModel.link_endpoints))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def link_get(db_engine : Engine, request : LinkId) -> Dict:
     link_uuid = link_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[LinkModel] = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none()
+        obj : Optional[LinkModel] = session.query(LinkModel)\
+            .options(selectinload(LinkModel.link_endpoints))\
+            .filter_by(link_uuid=link_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py
index e95cec4ae533795b23b8fd4e2f26ac9000c1bcce..13f0a2698c17874e1e15f4d6a1d527d366141f56 100644
--- a/src/context/service/database/PolicyRule.py
+++ b/src/context/service/database/PolicyRule.py
@@ -15,7 +15,7 @@
 import datetime, json
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.proto.policy_pb2 import PolicyRule, PolicyRuleId, PolicyRuleIdList, PolicyRuleList
@@ -31,14 +31,15 @@ from .uuids.Service import service_get_uuid
 def policyrule_list_ids(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
-        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
         return [obj.dump_id() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def policyrule_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
-        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
+        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel)\
+            .options(selectinload(PolicyRuleModel.policyrule_service))\
+            .options(selectinload(PolicyRuleModel.policyrule_devices))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -46,6 +47,8 @@ def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule:
     policyrule_uuid = policyrule_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[PolicyRuleModel] = session.query(PolicyRuleModel)\
+            .options(selectinload(PolicyRuleModel.policyrule_service))\
+            .options(selectinload(PolicyRuleModel.policyrule_devices))\
             .filter_by(policyrule_uuid=policyrule_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py
index fe12eaf8a011e2ae4861c64cdfac8c4b9c388731..32484a3095c3d937392f580597339fe047d36e3f 100644
--- a/src/context/service/database/Service.py
+++ b/src/context/service/database/Service.py
@@ -15,10 +15,10 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import ContextId, Service, ServiceId
+from common.proto.context_pb2 import ContextId, Service, ServiceFilter, ServiceId
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Service import json_service_id
@@ -43,14 +43,22 @@ def service_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def service_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[ServiceModel] = session.query(ServiceModel)\
+            .options(selectinload(ServiceModel.service_endpoints))\
+            .options(selectinload(ServiceModel.constraints))\
+            .options(selectinload(ServiceModel.config_rules))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def service_get(db_engine : Engine, request : ServiceId) -> Dict:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[ServiceModel] = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none()
+        obj : Optional[ServiceModel] = session.query(ServiceModel)\
+            .options(selectinload(ServiceModel.service_endpoints))\
+            .options(selectinload(ServiceModel.constraints))\
+            .options(selectinload(ServiceModel.config_rules))\
+            .filter_by(service_uuid=service_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -145,3 +153,22 @@ def service_delete(db_engine : Engine, request : ServiceId) -> Tuple[Dict, bool]
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_service_id(service_uuid, json_context_id(context_uuid)),deleted
+
+def service_select(db_engine : Engine, request : ServiceFilter) -> List[Dict]:
+    service_uuids = [
+        service_get_uuid(service_id, allow_random=False)[1]
+        for service_id in request.service_ids.service_ids
+    ]
+    dump_params = dict(
+        include_endpoint_ids=request.include_endpoint_ids,
+        include_constraints =request.include_constraints,
+        include_config_rules=request.include_config_rules,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(ServiceModel)
+        if request.include_endpoint_ids: query = query.options(selectinload(ServiceModel.service_endpoints))
+        if request.include_constraints : query = query.options(selectinload(ServiceModel.constraints))
+        if request.include_config_rules: query = query.options(selectinload(ServiceModel.config_rules))
+        obj_list : List[ServiceModel] = query.filter(ServiceModel.service_uuid.in_(service_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py
index 724046bfae16ea8e75ba84b83ff52c1050242003..abd140024f2a13289c7af6a3bafe363a8247e053 100644
--- a/src/context/service/database/Slice.py
+++ b/src/context/service/database/Slice.py
@@ -16,10 +16,10 @@ import datetime, logging
 from sqlalchemy import and_
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import ContextId, Slice, SliceId
+from common.proto.context_pb2 import ContextId, Slice, SliceFilter, SliceId
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Slice import json_slice_id
@@ -44,14 +44,26 @@ def slice_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def slice_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[SliceModel] = session.query(SliceModel)\
+            .options(selectinload(SliceModel.slice_endpoints))\
+            .options(selectinload(SliceModel.slice_services))\
+            .options(selectinload(SliceModel.slice_subslices))\
+            .options(selectinload(SliceModel.constraints))\
+            .options(selectinload(SliceModel.config_rules))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def slice_get(db_engine : Engine, request : SliceId) -> Dict:
     _,slice_uuid = slice_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[SliceModel] = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).one_or_none()
+        obj : Optional[SliceModel] = session.query(SliceModel)\
+            .options(selectinload(SliceModel.slice_endpoints))\
+            .options(selectinload(SliceModel.slice_services))\
+            .options(selectinload(SliceModel.slice_subslices))\
+            .options(selectinload(SliceModel.constraints))\
+            .options(selectinload(SliceModel.config_rules))\
+            .filter_by(slice_uuid=slice_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -240,3 +252,26 @@ def slice_delete(db_engine : Engine, request : SliceId) -> Tuple[Dict, bool]:
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_slice_id(slice_uuid, json_context_id(context_uuid)),deleted
+
+def slice_select(db_engine : Engine, request : SliceFilter) -> List[Dict]:
+    slice_uuids = [
+        slice_get_uuid(slice_id, allow_random=False)[1]
+        for slice_id in request.slice_ids.slice_ids
+    ]
+    dump_params = dict(
+        include_endpoint_ids=request.include_endpoint_ids,
+        include_constraints =request.include_constraints,
+        include_service_ids =request.include_service_ids,
+        include_subslice_ids=request.include_subslice_ids,
+        include_config_rules=request.include_config_rules,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(SliceModel)
+        if request.include_endpoint_ids: query = query.options(selectinload(SliceModel.slice_endpoints))
+        if request.include_service_ids : query = query.options(selectinload(SliceModel.slice_services))
+        if request.include_subslice_ids: query = query.options(selectinload(SliceModel.slice_subslices))
+        if request.include_constraints : query = query.options(selectinload(SliceModel.constraints))
+        if request.include_config_rules: query = query.options(selectinload(SliceModel.config_rules))
+        obj_list : List[SliceModel] = query.filter(SliceModel.slice_uuid.in_(slice_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py
index e2c6e2e996ac9321d0d8b9ae2ecea018b650632f..4440299b63f68613854e79998270872389d385cb 100644
--- a/src/context/service/database/Topology.py
+++ b/src/context/service/database/Topology.py
@@ -15,14 +15,16 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import ContextId, Topology, TopologyId
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology_id
-from .models.TopologyModel import TopologyModel
+from .models.DeviceModel import DeviceModel
+from .models.LinkModel import LinkModel
+from .models.TopologyModel import TopologyDeviceModel, TopologyLinkModel, TopologyModel
 from .uuids.Context import context_get_uuid
 from .uuids.Topology import topology_get_uuid
 
@@ -38,7 +40,10 @@ def topology_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def topology_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices))\
+            .options(selectinload(TopologyModel.topology_links))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -46,6 +51,8 @@ def topology_get(db_engine : Engine, request : TopologyId) -> Dict:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices))\
+            .options(selectinload(TopologyModel.topology_links))\
             .filter_by(topology_uuid=topology_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
@@ -62,7 +69,10 @@ def topology_get_details(db_engine : Engine, request : TopologyId) -> Dict:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices, TopologyDeviceModel.device, DeviceModel.endpoints))\
+            .options(selectinload(TopologyModel.topology_links, TopologyLinkModel.link, LinkModel.link_endpoints))\
             .filter_by(topology_uuid=topology_uuid).one_or_none()
+            #.options(selectinload(DeviceModel.components))\
         return None if obj is None else obj.dump_details()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py
index 156e33c6bb32e237af241035f1d9672b0b419222..f71d4177893d146af2f413781b51930c9909d827 100644
--- a/src/context/service/database/models/ConnectionModel.py
+++ b/src/context/service/database/models/ConnectionModel.py
@@ -59,8 +59,8 @@ class ConnectionEndPointModel(_Base):
     endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position        = Column(Integer, nullable=False)
 
-    connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined')
-    endpoint   = relationship('EndPointModel',   lazy='joined') # back_populates='connection_endpoints'
+    connection = relationship('ConnectionModel', back_populates='connection_endpoints') #, lazy='joined'
+    endpoint   = relationship('EndPointModel',   lazy='selectin') # back_populates='connection_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
@@ -72,5 +72,5 @@ class ConnectionSubServiceModel(_Base):
     connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
     subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
-    connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined')
-    subservice = relationship('ServiceModel',    lazy='joined') # back_populates='connection_subservices'
+    connection = relationship('ConnectionModel', back_populates='connection_subservices') #, lazy='joined'
+    subservice = relationship('ServiceModel',    lazy='selectin') # back_populates='connection_subservices'
diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py
index 2124386d16e2e33aec58f5b39bf0f89e3c6589f1..24130841d2bafde3608f2fa1cbdd476d28acba46 100644
--- a/src/context/service/database/models/DeviceModel.py
+++ b/src/context/service/database/models/DeviceModel.py
@@ -16,7 +16,7 @@ import operator
 from sqlalchemy import Column, DateTime, Enum, String
 from sqlalchemy.dialects.postgresql import ARRAY, UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.DeviceDriver import ORM_DeviceDriverEnum
 from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum
 from ._Base import _Base
@@ -39,19 +39,29 @@ class DeviceModel(_Base):
     def dump_id(self) -> Dict:
         return {'device_uuid': {'uuid': self.device_uuid}}
 
-    def dump(self) -> Dict:
-        return {
+    def dump_endpoints(self) -> List[Dict]:
+        return [endpoint.dump() for endpoint in self.endpoints]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    #def dump_components(self) -> List[Dict]:
+    #    return []
+
+    def dump(self,
+        include_endpoints : bool = True, include_config_rules : bool = True, include_components : bool = True,
+    ) -> Dict:
+        result = {
             'device_id'                : self.dump_id(),
             'name'                     : self.device_name,
             'device_type'              : self.device_type,
             'device_operational_status': self.device_operational_status.value,
             'device_drivers'           : [driver.value for driver in self.device_drivers],
-            'device_config'            : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
-            'device_endpoints'         : [
-                endpoint.dump()
-                for endpoint in self.endpoints
-            ],
         }
+        if include_endpoints: result['device_endpoints'] = self.dump_endpoints()
+        if include_config_rules: result['device_config'] = self.dump_config_rules()
+        #if include_components: result['components'] = self.dump_components()
+        return result
diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py
index 12ba7e10e7c3d5789f9bf16ad7b4f50c35a36bf5..a079f9900e39fdf3a4329e604f4e596e7f5d1f89 100644
--- a/src/context/service/database/models/EndPointModel.py
+++ b/src/context/service/database/models/EndPointModel.py
@@ -31,8 +31,8 @@ class EndPointModel(_Base):
     created_at       = Column(DateTime, nullable=False)
     updated_at       = Column(DateTime, nullable=False)
 
-    device            = relationship('DeviceModel',          back_populates='endpoints')
-    topology          = relationship('TopologyModel')
+    device            = relationship('DeviceModel',          back_populates='endpoints') # lazy='selectin'
+    topology          = relationship('TopologyModel', lazy='selectin')
     #link_endpoints    = relationship('LinkEndPointModel',    back_populates='endpoint' )
     #service_endpoints = relationship('ServiceEndPointModel', back_populates='endpoint' )
 
diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py
index e9fd9bc8742222e0934a76b6e0ffa4acb1b71f40..9c16da3c9146f28352e8b4f7a6f9ab85f870c8b7 100644
--- a/src/context/service/database/models/LinkModel.py
+++ b/src/context/service/database/models/LinkModel.py
@@ -50,8 +50,8 @@ class LinkEndPointModel(_Base):
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position      = Column(Integer, nullable=False)
 
-    link     = relationship('LinkModel',     back_populates='link_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints'
+    link     = relationship('LinkModel',     back_populates='link_endpoints') #, lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='link_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py
index 663a9a39a30903b3dd41ccfee56da19528325af0..32364e289cf68fe760c60eb27cde933f7cf448a4 100644
--- a/src/context/service/database/models/PolicyRuleModel.py
+++ b/src/context/service/database/models/PolicyRuleModel.py
@@ -74,4 +74,4 @@ class PolicyRuleDeviceModel(_Base):
     device_uuid     = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices'
-    device     = relationship('DeviceModel',     lazy='joined') # back_populates='policyrule_devices'
+    device     = relationship('DeviceModel',     lazy='selectin') # back_populates='policyrule_devices'
diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py
index f1781c4f86fa25e8d9f3e42da46451e112ef779e..ef6e1b06aaaa616ede6f9633e4e0d7fc0aabf336 100644
--- a/src/context/service/database/models/ServiceModel.py
+++ b/src/context/service/database/models/ServiceModel.py
@@ -16,7 +16,7 @@ import operator
 from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
 from sqlalchemy.dialects.postgresql import UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.ServiceStatus import ORM_ServiceStatusEnum
 from .enums.ServiceType import ORM_ServiceTypeEnum
 from ._Base import _Base
@@ -32,10 +32,10 @@ class ServiceModel(_Base):
     created_at     = Column(DateTime, nullable=False)
     updated_at     = Column(DateTime, nullable=False)
 
-    context           = relationship('ContextModel', back_populates='services')
-    service_endpoints = relationship('ServiceEndPointModel') # lazy='joined', back_populates='service'
-    constraints       = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='service'
-    config_rules      = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='service'
+    context           = relationship('ContextModel', back_populates='services', lazy='selectin')
+    service_endpoints = relationship('ServiceEndPointModel') # lazy='selectin', back_populates='service'
+    constraints       = relationship('ConstraintModel', passive_deletes=True) # lazy='selectin', back_populates='service'
+    config_rules      = relationship('ConfigRuleModel', passive_deletes=True) # lazy='selectin', back_populates='service'
 
     def dump_id(self) -> Dict:
         return {
@@ -43,25 +43,37 @@ class ServiceModel(_Base):
             'service_uuid': {'uuid': self.service_uuid},
         }
 
-    def dump(self) -> Dict:
-        return {
-            'service_id'          : self.dump_id(),
-            'name'                : self.service_name,
-            'service_type'        : self.service_type.value,
-            'service_status'      : {'service_status': self.service_status.value},
-            'service_endpoint_ids': [
-                service_endpoint.endpoint.dump_id()
-                for service_endpoint in sorted(self.service_endpoints, key=operator.attrgetter('position'))
-            ],
-            'service_constraints' : [
-                constraint.dump()
-                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
-            ],
-            'service_config'      : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
+    def dump_endpoint_ids(self) -> List[Dict]:
+        return [
+            service_endpoint.endpoint.dump_id()
+            for service_endpoint in sorted(self.service_endpoints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_constraints(self) -> List[Dict]:
+        return [
+            constraint.dump()
+            for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    def dump(
+        self, include_endpoint_ids : bool = True, include_constraints : bool = True, include_config_rules : bool = True
+    ) -> Dict:
+        result = {
+            'service_id'    : self.dump_id(),
+            'name'          : self.service_name,
+            'service_type'  : self.service_type.value,
+            'service_status': {'service_status': self.service_status.value},
         }
+        if include_endpoint_ids: result['service_endpoint_ids'] = self.dump_endpoint_ids()
+        if include_constraints: result['service_constraints'] = self.dump_constraints()
+        if include_config_rules: result['service_config'] = self.dump_config_rules()
+        return result
 
 class ServiceEndPointModel(_Base):
     __tablename__ = 'service_endpoint'
@@ -70,8 +82,8 @@ class ServiceEndPointModel(_Base):
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position      = Column(Integer, nullable=False)
 
-    service  = relationship('ServiceModel',  back_populates='service_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints'
+    service  = relationship('ServiceModel',  back_populates='service_endpoints') # lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='service_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py
index 7f1550eb2ebb80962bac94374112d43785184374..423af244e186301cf3132eea3fc7cbea16bf9fe9 100644
--- a/src/context/service/database/models/SliceModel.py
+++ b/src/context/service/database/models/SliceModel.py
@@ -16,7 +16,7 @@ import operator
 from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
 from sqlalchemy.dialects.postgresql import UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.SliceStatus import ORM_SliceStatusEnum
 from ._Base import _Base
 
@@ -32,13 +32,13 @@ class SliceModel(_Base):
     created_at         = Column(DateTime, nullable=False)
     updated_at         = Column(DateTime, nullable=False)
 
-    context         = relationship('ContextModel', back_populates='slices')
-    slice_endpoints = relationship('SliceEndPointModel') # lazy='joined', back_populates='slice'
-    slice_services  = relationship('SliceServiceModel') # lazy='joined', back_populates='slice'
+    context         = relationship('ContextModel', back_populates='slices', lazy='selectin')
+    slice_endpoints = relationship('SliceEndPointModel') # lazy='selectin', back_populates='slice'
+    slice_services  = relationship('SliceServiceModel') # lazy='selectin', back_populates='slice'
     slice_subslices = relationship(
         'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid')
-    constraints     = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='slice'
-    config_rules    = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='slice'
+    constraints     = relationship('ConstraintModel', passive_deletes=True) # lazy='selectin', back_populates='slice'
+    config_rules    = relationship('ConfigRuleModel', passive_deletes=True) # lazy='selectin', back_populates='slice'
 
     def dump_id(self) -> Dict:
         return {
@@ -46,36 +46,59 @@ class SliceModel(_Base):
             'slice_uuid': {'uuid': self.slice_uuid},
         }
 
-    def dump(self) -> Dict:
+
+    def dump_endpoint_ids(self) -> List[Dict]:
+        return [
+            slice_endpoint.endpoint.dump_id()
+            for slice_endpoint in sorted(self.slice_endpoints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_constraints(self) -> List[Dict]:
+        return [
+            constraint.dump()
+            for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    def dump_service_ids(self) -> List[Dict]:
+        return [
+            slice_service.service.dump_id()
+            for slice_service in self.slice_services
+        ]
+
+    def dump_subslice_ids(self) -> List[Dict]:
+        return [
+            slice_subslice.subslice.dump_id()
+            for slice_subslice in self.slice_subslices
+        ]
+
+    def dump_owner_id(self) -> Dict:
         return {
-            'slice_id'          : self.dump_id(),
-            'name'              : self.slice_name,
-            'slice_status'      : {'slice_status': self.slice_status.value},
-            'slice_endpoint_ids': [
-                slice_endpoint.endpoint.dump_id()
-                for slice_endpoint in sorted(self.slice_endpoints, key=operator.attrgetter('position'))
-            ],
-            'slice_constraints' : [
-                constraint.dump()
-                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
-            ],
-            'slice_config'      : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
-            'slice_service_ids': [
-                slice_service.service.dump_id()
-                for slice_service in self.slice_services
-            ],
-            'slice_subslice_ids': [
-                slice_subslice.subslice.dump_id()
-                for slice_subslice in self.slice_subslices
-            ],
-            'slice_owner': {
-                'owner_uuid': {'uuid': self.slice_owner_uuid},
-                'owner_string': self.slice_owner_string
-            }
+            'owner_uuid': {'uuid': self.slice_owner_uuid},
+            'owner_string': self.slice_owner_string
+        }
+
+    def dump(
+        self, include_endpoint_ids : bool = True, include_constraints : bool = True, include_service_ids : bool = True,
+        include_subslice_ids : bool = True, include_config_rules : bool = True
+    ) -> Dict:
+        result = {
+            'slice_id'    : self.dump_id(),
+            'name'        : self.slice_name,
+            'slice_status': {'slice_status': self.slice_status.value},
+            'slice_owner' : self.dump_owner_id()
         }
+        if include_endpoint_ids: result['slice_endpoint_ids'] = self.dump_endpoint_ids()
+        if include_constraints : result['slice_constraints' ] = self.dump_constraints()
+        if include_service_ids : result['slice_service_ids' ] = self.dump_service_ids()
+        if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids()
+        if include_config_rules: result['slice_config'      ] = self.dump_config_rules()
+        return result
 
 class SliceEndPointModel(_Base):
     __tablename__ = 'slice_endpoint'
@@ -84,8 +107,8 @@ class SliceEndPointModel(_Base):
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position      = Column(Integer, nullable=False)
 
-    slice    = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints'
+    slice    = relationship('SliceModel', back_populates='slice_endpoints') #, lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='slice_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
@@ -97,8 +120,8 @@ class SliceServiceModel(_Base):
     slice_uuid   = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE' ), primary_key=True)
     service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
-    slice   = relationship('SliceModel', back_populates='slice_services', lazy='joined')
-    service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services'
+    slice   = relationship('SliceModel', back_populates='slice_services') # , lazy='selectin'
+    service = relationship('ServiceModel', lazy='selectin') # back_populates='slice_services'
 
 class SliceSubSliceModel(_Base):
     __tablename__ = 'slice_subslice'
@@ -107,5 +130,5 @@ class SliceSubSliceModel(_Base):
     subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True)
 
     slice    = relationship(
-        'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined')
-    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined')
+        'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices') #, lazy='selectin'
+    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='selectin')
diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py
index 7dc2333f0a9b979f251c173d850a235dcb822d91..0ed4a038bcf4426f4cf112bd03c5cb36cb42c822 100644
--- a/src/context/service/database/models/TopologyModel.py
+++ b/src/context/service/database/models/TopologyModel.py
@@ -27,7 +27,7 @@ class TopologyModel(_Base):
     created_at    = Column(DateTime, nullable=False)
     updated_at    = Column(DateTime, nullable=False)
 
-    context          = relationship('ContextModel', back_populates='topologies')
+    context          = relationship('ContextModel', back_populates='topologies', lazy='selectin')
     topology_devices = relationship('TopologyDeviceModel') # back_populates='topology'
     topology_links   = relationship('TopologyLinkModel'  ) # back_populates='topology'
 
@@ -46,11 +46,19 @@ class TopologyModel(_Base):
         }
 
     def dump_details(self) -> Dict:
+        devices = [
+            td.device.dump(include_config_rules=False, include_components=False)
+            for td in self.topology_devices
+        ]
+        links = [
+            tl.link.dump()
+            for tl in self.topology_links
+        ]
         return {
             'topology_id': self.dump_id(),
             'name'       : self.topology_name,
-            'devices'    : [td.device.dump() for td in self.topology_devices],
-            'links'      : [tl.link.dump()   for tl in self.topology_links  ],
+            'devices'    : devices,
+            'links'      : links,
         }
 
 class TopologyDeviceModel(_Base):
@@ -59,8 +67,8 @@ class TopologyDeviceModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices'
-    device   = relationship('DeviceModel',   lazy='joined') # back_populates='topology_devices'
+    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_devices'
+    device   = relationship('DeviceModel',   lazy='selectin') # back_populates='topology_devices'
 
 class TopologyLinkModel(_Base):
     __tablename__ = 'topology_link'
@@ -68,5 +76,5 @@ class TopologyLinkModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links'
-    link     = relationship('LinkModel',     lazy='joined') # back_populates='topology_links'
+    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_links'
+    link     = relationship('LinkModel',     lazy='selectin') # back_populates='topology_links'
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index 2b08b6c7e03cfd50557f25f99ffea3032dbb811e..205d769acb76992aeba33fc54b7e7b8fbbdc8d06 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -160,6 +160,12 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
                 for error in errors: LOGGER.error(error)
                 raise OperationFailedException('ConfigureDevice', extra_details=errors)
 
+            # Context Performance+Scalability enhancement:
+            # This method, besides P4 logic, does not add/update/delete endpoints.
+            # Remove endpoints to reduce number of inserts done by Context.
+            # TODO: Add logic to inspect endpoints and keep only those ones modified with respect to Context.
+            del device.device_endpoints[:]
+
             # Note: Rules are updated by configure_rules() and deconfigure_rules() methods.
             device_id = context_client.SetDevice(device)
             return device_id
diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py
index 6b98255411aa88ac18bd01474830b3bf268d3483..f928f07b94c71fb6f378161862e96d41af8bde7f 100644
--- a/src/monitoring/service/MetricsDBTools.py
+++ b/src/monitoring/service/MetricsDBTools.py
@@ -264,68 +264,65 @@ class MetricsDB():
                 for kpi in kpi_list:
                     alarm = False
                     kpi_value = kpi[2]
+                    kpiMinIsNone = ((kpiMinValue is None) or math.isnan(kpiMinValue))
+                    kpiMaxIsNone = ((kpiMaxValue is None) or math.isnan(kpiMaxValue))
                     if (kpiMinValue == kpi_value and kpiMaxValue == kpi_value and inRange):
                         alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and includeMaxValue):
                         if (kpi_value >= kpiMinValue and kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and not includeMaxValue):
                         if (kpi_value >= kpiMinValue and kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and includeMaxValue):
                         if (kpi_value > kpiMinValue and kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and not includeMaxValue):
                         if (kpi_value > kpiMinValue and kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and includeMaxValue):
                         if (kpi_value <= kpiMinValue or kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and not includeMaxValue):
                         if (kpi_value <= kpiMinValue or kpi_value > kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and includeMaxValue):
                         if (kpi_value < kpiMinValue or kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and not includeMaxValue):
                         if (kpi_value < kpiMinValue or kpi_value > kpiMaxValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and includeMinValue):
+                    elif (inRange and not kpiMinIsNone and kpiMaxIsNone and includeMinValue):
                         if (kpi_value >= kpiMinValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                    elif (inRange and not kpiMinIsNone and kpiMaxIsNone and not includeMinValue):
                         if (kpi_value > kpiMinValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                    elif (not inRange and not kpiMinIsNone and kpiMaxIsNone and includeMinValue):
                         if (kpi_value <= kpiMinValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
-                        if (kpi_value <= kpiMinValue):
+                    elif (not inRange and not kpiMinIsNone and kpiMaxIsNone and not includeMinValue):
+                        if (kpi_value < kpiMinValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and includeMaxValue):
+                    elif (inRange and kpiMinIsNone and not kpiMaxIsNone and includeMaxValue):
                         if (kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                    elif (inRange and kpiMinIsNone and not kpiMaxIsNone and not includeMaxValue):
                         if (kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                    elif (not inRange and kpiMinIsNone and not kpiMaxIsNone and includeMaxValue):
                         if (kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
-                        if (kpi_value >= kpiMaxValue):
+                    elif (not inRange and kpiMinIsNone and not kpiMaxIsNone and not includeMaxValue):
+                        if (kpi_value > kpiMaxValue):
                             alarm = True
                     if alarm:
                         valid_kpi_list.append(kpi)
-                alarm_queue.put_nowait(valid_kpi_list)
-                LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+                if valid_kpi_list:
+                    alarm_queue.put_nowait(valid_kpi_list)
+                    LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+                else:
+                    LOGGER.debug(f"No new alarms triggered for the alarm of KPI {kpi_id}")
             else:
                 LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}")
         except (Exception) as e:
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index f408734df40c1bc5c16b7e108e3ce5a211165f71..62adcf465d49de782e17ee587d6ce67724d44b38 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -407,8 +407,8 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
             alarm_description = request.alarm_description
             alarm_name = request.name
             kpi_id = request.kpi_id.kpi_id.uuid
-            kpi_min_value = request.kpi_value_range.kpiMinValue.floatVal
-            kpi_max_value = request.kpi_value_range.kpiMaxValue.floatVal
+            kpi_min_value = float(request.kpi_value_range.kpiMinValue.floatVal)
+            kpi_max_value = float(request.kpi_value_range.kpiMaxValue.floatVal)
             in_range = request.kpi_value_range.inRange
             include_min_value = request.kpi_value_range.includeMinValue
             include_max_value = request.kpi_value_range.includeMaxValue
diff --git a/src/pathcomp/frontend/Config.py b/src/pathcomp/frontend/Config.py
index f17a9f5377b5abcbd9001d1d3773e26998cb3211..714eb7278074ac860caa76dc3ed8b4a40ae9f192 100644
--- a/src/pathcomp/frontend/Config.py
+++ b/src/pathcomp/frontend/Config.py
@@ -26,8 +26,9 @@ PATHCOMP_BACKEND_BASEURL = str(os.environ.get('PATHCOMP_BACKEND_BASEURL', DEFAUL
 # - first check env vars PATHCOMP_BACKEND_HOST & PATHCOMP_BACKEND_PORT
 # - if not set, check env vars PATHCOMPSERVICE_SERVICE_HOST & PATHCOMPSERVICE_SERVICE_PORT_HTTP
 # - if not set, use DEFAULT_PATHCOMP_BACKEND_HOST & DEFAULT_PATHCOMP_BACKEND_PORT
+
 backend_host = DEFAULT_PATHCOMP_BACKEND_HOST
-backend_host = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', backend_host)
+#backend_host = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', backend_host)
 PATHCOMP_BACKEND_HOST = str(os.environ.get('PATHCOMP_BACKEND_HOST', backend_host))
 
 backend_port = DEFAULT_PATHCOMP_BACKEND_PORT
diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile
index 352de75f31366b65e62e2f6357d1bd5f28bd2b0f..9384b3e19edd5e82b0efcb9706c41105a31321e3 100644
--- a/src/pathcomp/frontend/Dockerfile
+++ b/src/pathcomp/frontend/Dockerfile
@@ -62,8 +62,14 @@ RUN python3 -m pip install -r requirements.txt
 
 # Add component files into working directory
 WORKDIR /var/teraflow
-COPY src/context/. context/
-COPY src/device/. device/
+COPY src/context/__init__.py context/__init__.py
+COPY src/context/client/. context/client/
+COPY src/device/__init__.py device/__init__.py
+COPY src/device/client/. device/client/
+COPY src/service/__init__.py service/__init__.py
+COPY src/service/client/. service/client/
+COPY src/slice/__init__.py slice/__init__.py
+COPY src/slice/client/. slice/client/
 COPY src/pathcomp/. pathcomp/
 
 # Start the service
diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
index 6fc33dbd45a92405fb2fa115e12cb460a9111d54..52f1cd3d584e14ca5dee1bc5e0511e014bdc8e73 100644
--- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
+++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
@@ -13,9 +13,9 @@
 # limitations under the License.
 
 import grpc, logging, threading
-from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
-from common.proto.context_pb2 import ContextId, Empty
+from common.proto.context_pb2 import ContextId, Empty, TopologyId
 from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
 from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer
 from common.tools.context_queries.Device import get_devices_in_topology
@@ -23,6 +23,7 @@ from common.tools.context_queries.Link import get_links_in_topology
 from common.tools.context_queries.InterDomain import is_inter_domain
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.service.algorithms.Factory import get_algorithm
 
@@ -30,7 +31,7 @@ LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('PathComp', 'RPC')
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+#ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 class PathCompServiceServicerImpl(PathCompServiceServicer):
     def __init__(self) -> None:
@@ -44,18 +45,23 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
 
         context_client = ContextClient()
 
+        context_id = json_context_id(DEFAULT_CONTEXT_NAME)
         if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids):
-            devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
-            links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            #devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            #links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            topology_id = json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id)
         else:
             # TODO: improve filtering of devices and links
             # TODO: add contexts, topologies, and membership of devices/links in topologies
-            devices = context_client.ListDevices(Empty())
-            links = context_client.ListLinks(Empty())
+            #devices = context_client.ListDevices(Empty())
+            #links = context_client.ListLinks(Empty())
+            topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id)
+
+        topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id))
 
         algorithm = get_algorithm(request)
-        algorithm.add_devices(devices)
-        algorithm.add_links(links)
+        algorithm.add_devices(topology_details.devices)
+        algorithm.add_links(topology_details.links)
         algorithm.add_service_requests(request)
 
         #LOGGER.debug('device_list = {:s}'  .format(str(algorithm.device_list  )))
diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
index a6d39ee36949e075323613fceb71da5c77354fe5..144246620e85dd1aaf507efe75e22b62ce942587 100644
--- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
@@ -14,12 +14,10 @@
 
 import operator
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import Connection, Link, Service
-from common.proto.pathcomp_pb2 import Algorithm_KDisjointPath, Algorithm_KShortestPath, PathCompReply, PathCompRequest
+from common.proto.context_pb2 import Link
+from common.proto.pathcomp_pb2 import Algorithm_KDisjointPath, Algorithm_KShortestPath, PathCompRequest
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from pathcomp.frontend.service.algorithms.tools.ComputeSubServices import convert_explicit_path_hops_to_connections
-from pathcomp.frontend.service.algorithms.tools.EroPathToHops import eropath_to_hops
-from ._Algorithm import _Algorithm
+from ._Algorithm import _Algorithm, SRC_END
 from .KShortestPathAlgorithm import KShortestPathAlgorithm
 
 Service_Id          = Tuple[str, str]   # (context_uuid, service_uuid)
@@ -100,7 +98,7 @@ class KDisjointPathAlgorithm(_Algorithm):
     def get_link_from_endpoint(self, endpoint : Dict) -> Tuple[Dict, Link]:
         device_uuid = endpoint['device_id']
         endpoint_uuid = endpoint['endpoint_uuid']
-        item = self.endpoint_to_link_dict.get((device_uuid, endpoint_uuid))
+        item = self.endpoint_to_link_dict.get((device_uuid, endpoint_uuid, SRC_END))
         if item is None:
             MSG = 'Link for Endpoint({:s}, {:s}) not found'
             self.logger.warning(MSG.format(device_uuid, endpoint_uuid))
@@ -141,7 +139,7 @@ class KDisjointPathAlgorithm(_Algorithm):
 
         Path = List[Dict]
         Path_NoPath = Optional[Path] # None = no path, list = path
-        self.json_reply : Dict[Tuple[str, str], List[Path_NoPath]] = dict()
+        service_to_paths : Dict[Tuple[str, str], List[Path_NoPath]] = dict()
 
         for num_path in range(self.num_disjoint):
             algorithm.service_list = list()
@@ -189,66 +187,25 @@ class KDisjointPathAlgorithm(_Algorithm):
             for response in response_list:
                 service_id = response['serviceId']
                 service_key = (service_id['contextId'], service_id['service_uuid'])
-                json_reply_service = self.json_reply.setdefault(service_key, list())
+                json_reply_service = service_to_paths.setdefault(service_key, list())
 
                 no_path_issue = response.get('noPath', {}).get('issue')
-                if no_path_issue is not None:
-                    json_reply_service.append(None)
-                    continue
+                if no_path_issue is not None: continue
 
-                path_endpoints = response['path'][0]['devices']
+                path_endpoints = response['path'][0]
                 json_reply_service.append(path_endpoints)
-                algorithm.link_list = self.remove_traversed_links(algorithm.link_list, path_endpoints)
+                algorithm.link_list = self.remove_traversed_links(algorithm.link_list, path_endpoints['devices'])
+
+        self.json_reply = dict()
+        response_list = self.json_reply.setdefault('response-list', [])
+        for service_key,paths in service_to_paths.items():
+            response = {'serviceId': {
+                'contextId': service_key[0],
+                'service_uuid': service_key[1],
+            }}
+            response['path'] = paths
+            if len(paths) < self.num_disjoint:
+                response['noPath'] = {'issue': 1}
+            response_list.append(response)
 
         self.logger.debug('self.json_reply = {:s}'.format(str(self.json_reply)))
-
-    def get_reply(self) -> PathCompReply:
-        reply = PathCompReply()
-        grpc_services : Dict[Tuple[str, str], Service] = {}
-        grpc_connections : Dict[Tuple[int, str], Connection] = {}
-        for service_key,paths in self.json_reply.items():
-            context_uuid, service_uuid = service_key
-
-            grpc_services[service_key] = self.add_service_to_reply(reply, context_uuid, service_uuid)
-
-            for num_path,service_path_ero in enumerate(paths):
-                self.logger.warning('num_path={:d}'.format(num_path))
-                self.logger.warning('service_path_ero={:s}'.format(str(service_path_ero)))
-                if service_path_ero is None: continue
-                path_hops = eropath_to_hops(service_path_ero, self.endpoint_to_link_dict)
-                self.logger.warning('path_hops={:s}'.format(str(path_hops)))
-                connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid)
-                self.logger.warning('connections={:s}'.format(str(connections)))
-
-                for connection in connections:
-                    connection_uuid,device_layer,path_hops,_ = connection
-
-                    service_key = (context_uuid, connection_uuid)
-                    grpc_service = grpc_services.get(service_key)
-                    if grpc_service is not None: continue
-                    grpc_service = self.add_service_to_reply(
-                        reply, context_uuid, connection_uuid, device_layer=device_layer, path_hops=path_hops)
-                    grpc_services[service_key] = grpc_service
-
-                for connection in connections:
-                    connection_uuid,device_layer,path_hops,dependencies = connection
-
-                    service_key = (context_uuid, connection_uuid)
-                    grpc_service = grpc_services.get(service_key)
-                    if grpc_service is None: raise Exception('Service({:s}) not found'.format(str(service_key)))
-
-                    connection_uuid = '{:s}:{:d}'.format(connection_uuid, num_path)
-                    grpc_connection = grpc_connections.get(connection_uuid)
-                    if grpc_connection is not None: continue
-                    grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops)
-                    grpc_connections[connection_uuid] = grpc_connection
-
-                    for sub_service_uuid in dependencies:
-                        sub_service_key = (context_uuid, sub_service_uuid)
-                        grpc_sub_service = grpc_services.get(sub_service_key)
-                        if grpc_sub_service is None:
-                            raise Exception('Service({:s}) not found'.format(str(sub_service_key)))
-                        grpc_sub_service_id = grpc_connection.sub_service_ids.add()
-                        grpc_sub_service_id.CopyFrom(grpc_sub_service.service_id)
-
-        return reply
diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py
index f26d74ce4c665663735bae69dcfb5a4e14311bfa..5290123b62251a58d8e0a7f273ea23c38ee2cc8a 100644
--- a/src/pathcomp/frontend/tests/Objects_A_B_C.py
+++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py
@@ -80,21 +80,36 @@ DEVICE_C3_ID, DEVICE_C3_ENDPOINTS, DEVICE_C3 = compose_device('C3', ['1', '2', '
 LINK_A2_C3_ID, LINK_A2_C3 = compose_link(DEVICE_A2_ENDPOINTS[2], DEVICE_C3_ENDPOINTS[2])
 LINK_C1_B2_ID, LINK_C1_B2 = compose_link(DEVICE_C1_ENDPOINTS[2], DEVICE_B2_ENDPOINTS[2])
 
+LINK_C3_A2_ID, LINK_C3_A2 = compose_link(DEVICE_C3_ENDPOINTS[2], DEVICE_A2_ENDPOINTS[2])
+LINK_B2_C1_ID, LINK_B2_C1 = compose_link(DEVICE_B2_ENDPOINTS[2], DEVICE_C1_ENDPOINTS[2])
+
 # ----- IntraDomain A Links --------------------------------------------------------------------------------------------
 LINK_A1_A2_ID, LINK_A1_A2 = compose_link(DEVICE_A1_ENDPOINTS[0], DEVICE_A2_ENDPOINTS[0])
 LINK_A1_A3_ID, LINK_A1_A3 = compose_link(DEVICE_A1_ENDPOINTS[1], DEVICE_A3_ENDPOINTS[0])
 LINK_A2_A3_ID, LINK_A2_A3 = compose_link(DEVICE_A2_ENDPOINTS[1], DEVICE_A3_ENDPOINTS[1])
 
+LINK_A2_A1_ID, LINK_A2_A1 = compose_link(DEVICE_A2_ENDPOINTS[0], DEVICE_A1_ENDPOINTS[0])
+LINK_A3_A1_ID, LINK_A3_A1 = compose_link(DEVICE_A3_ENDPOINTS[0], DEVICE_A1_ENDPOINTS[1])
+LINK_A3_A2_ID, LINK_A3_A2 = compose_link(DEVICE_A3_ENDPOINTS[1], DEVICE_A2_ENDPOINTS[1])
+
 # ----- IntraDomain B Links --------------------------------------------------------------------------------------------
 LINK_B1_B2_ID, LINK_B1_B2 = compose_link(DEVICE_B1_ENDPOINTS[0], DEVICE_B2_ENDPOINTS[0])
 LINK_B1_B3_ID, LINK_B1_B3 = compose_link(DEVICE_B1_ENDPOINTS[1], DEVICE_B3_ENDPOINTS[0])
 LINK_B2_B3_ID, LINK_B2_B3 = compose_link(DEVICE_B2_ENDPOINTS[1], DEVICE_B3_ENDPOINTS[1])
 
+LINK_B2_B1_ID, LINK_B2_B1 = compose_link(DEVICE_B2_ENDPOINTS[0], DEVICE_B1_ENDPOINTS[0])
+LINK_B3_B1_ID, LINK_B3_B1 = compose_link(DEVICE_B3_ENDPOINTS[0], DEVICE_B1_ENDPOINTS[1])
+LINK_B3_B2_ID, LINK_B3_B2 = compose_link(DEVICE_B3_ENDPOINTS[1], DEVICE_B2_ENDPOINTS[1])
+
 # ----- IntraDomain C Links --------------------------------------------------------------------------------------------
 LINK_C1_C2_ID, LINK_C1_C2 = compose_link(DEVICE_C1_ENDPOINTS[0], DEVICE_C2_ENDPOINTS[0])
 LINK_C1_C3_ID, LINK_C1_C3 = compose_link(DEVICE_C1_ENDPOINTS[1], DEVICE_C3_ENDPOINTS[0])
 LINK_C2_C3_ID, LINK_C2_C3 = compose_link(DEVICE_C2_ENDPOINTS[1], DEVICE_C3_ENDPOINTS[1])
 
+LINK_C2_C1_ID, LINK_C2_C1 = compose_link(DEVICE_C2_ENDPOINTS[0], DEVICE_C1_ENDPOINTS[0])
+LINK_C3_C1_ID, LINK_C3_C1 = compose_link(DEVICE_C3_ENDPOINTS[0], DEVICE_C1_ENDPOINTS[1])
+LINK_C3_C2_ID, LINK_C3_C2 = compose_link(DEVICE_C3_ENDPOINTS[1], DEVICE_C2_ENDPOINTS[1])
+
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_A1_B1 = compose_service(DEVICE_A1_ENDPOINTS[2], DEVICE_B1_ENDPOINTS[2], constraints=[
     json_constraint_sla_capacity(10.0),
@@ -108,31 +123,38 @@ DEVICES    = [  DEVICE_A1, DEVICE_A2, DEVICE_A3,
                 DEVICE_B1, DEVICE_B2, DEVICE_B3,
                 DEVICE_C1, DEVICE_C2, DEVICE_C3,    ]
 LINKS      = [  LINK_A2_C3, LINK_C1_B2,
+                LINK_C3_A2, LINK_B2_C1,
+
                 LINK_A1_A2, LINK_A1_A3, LINK_A2_A3,
+                LINK_A2_A1, LINK_A3_A1, LINK_A3_A2,
+
                 LINK_B1_B2, LINK_B1_B3, LINK_B2_B3,
-                LINK_C1_C2, LINK_C1_C3, LINK_C2_C3, ]
+                LINK_B2_B1, LINK_B3_B1, LINK_B3_B2,
+
+                LINK_C1_C2, LINK_C1_C3, LINK_C2_C3,
+                LINK_C2_C1, LINK_C3_C1, LINK_C3_C2, ]
 SERVICES   = [  SERVICE_A1_B1]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPOLOGY_ADMIN_ID,
-        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,
-            DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,
-            DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
-        [   LINK_A2_C3_ID, LINK_C1_B2_ID,
-            LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,
-            LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,
-            LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
-    ),
-    (TOPOLOGY_A_ID,
-        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,       ],
-        [   LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,    ],
-    ),
-    (TOPOLOGY_B_ID,
-        [   DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,       ],
-        [   LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,    ],
-    ),
-    (TOPOLOGY_C_ID,
-        [   DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
-        [   LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
-    ),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPOLOGY_ADMIN_ID,
+#        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,
+#            DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,
+#            DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
+#        [   LINK_A2_C3_ID, LINK_C1_B2_ID,
+#            LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,
+#            LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,
+#            LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
+#    ),
+#    (TOPOLOGY_A_ID,
+#        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,       ],
+#        [   LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,    ],
+#    ),
+#    (TOPOLOGY_B_ID,
+#        [   DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,       ],
+#        [   LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,    ],
+#    ),
+#    (TOPOLOGY_C_ID,
+#        [   DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
+#        [   LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
+#    ),
+#]
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
index 9ee784e1f76026416bca9824aa8e54e2c4f874f2..053dfd4c45e3822914745905c71f9b64300e1a2f 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
@@ -118,6 +118,11 @@ LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1
 LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
 LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
 
+LINK_CS1GW1_DC1GW_ID, LINK_CS1GW1_DC1GW = compose_link(DEV_CS1GW1_EPS[0], DEV_DC1GW_EPS[0])
+LINK_CS1GW2_DC1GW_ID, LINK_CS1GW2_DC1GW = compose_link(DEV_CS1GW2_EPS[0], DEV_DC1GW_EPS[1])
+LINK_CS2GW1_DC2GW_ID, LINK_CS2GW1_DC2GW = compose_link(DEV_CS2GW1_EPS[0], DEV_DC2GW_EPS[0])
+LINK_CS2GW2_DC2GW_ID, LINK_CS2GW2_DC2GW = compose_link(DEV_CS2GW2_EPS[0], DEV_DC2GW_EPS[1])
+
 # InterDomain CSGW-TN
 LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
 LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
@@ -128,6 +133,15 @@ LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4
 LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
 LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
 
+LINK_TNR1_CS1GW1_ID, LINK_TNR1_CS1GW1 = compose_link(DEV_TNR1_EPS[0], DEV_CS1GW1_EPS[1])
+LINK_TNR2_CS1GW2_ID, LINK_TNR2_CS1GW2 = compose_link(DEV_TNR2_EPS[0], DEV_CS1GW2_EPS[1])
+LINK_TNR2_CS1GW1_ID, LINK_TNR2_CS1GW1 = compose_link(DEV_TNR2_EPS[1], DEV_CS1GW1_EPS[2])
+LINK_TNR1_CS1GW2_ID, LINK_TNR1_CS1GW2 = compose_link(DEV_TNR1_EPS[1], DEV_CS1GW2_EPS[2])
+LINK_TNR3_CS2GW1_ID, LINK_TNR3_CS2GW1 = compose_link(DEV_TNR3_EPS[0], DEV_CS2GW1_EPS[1])
+LINK_TNR4_CS2GW2_ID, LINK_TNR4_CS2GW2 = compose_link(DEV_TNR4_EPS[0], DEV_CS2GW2_EPS[1])
+LINK_TNR4_CS2GW1_ID, LINK_TNR4_CS2GW1 = compose_link(DEV_TNR4_EPS[1], DEV_CS2GW1_EPS[2])
+LINK_TNR3_CS2GW2_ID, LINK_TNR3_CS2GW2 = compose_link(DEV_TNR3_EPS[1], DEV_CS2GW2_EPS[2])
+
 # IntraDomain TN
 LINK_TNR1_TNR2_ID, LINK_TNR1_TNR2 = compose_link(DEV_TNR1_EPS[2], DEV_TNR2_EPS[3])
 LINK_TNR2_TNR3_ID, LINK_TNR2_TNR3 = compose_link(DEV_TNR2_EPS[2], DEV_TNR3_EPS[3])
@@ -136,6 +150,13 @@ LINK_TNR4_TNR1_ID, LINK_TNR4_TNR1 = compose_link(DEV_TNR4_EPS[2], DEV_TNR1_EPS[3
 LINK_TNR1_TNR3_ID, LINK_TNR1_TNR3 = compose_link(DEV_TNR1_EPS[4], DEV_TNR3_EPS[4])
 LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4])
 
+LINK_TNR2_TNR1_ID, LINK_TNR2_TNR1 = compose_link(DEV_TNR2_EPS[3], DEV_TNR1_EPS[2])
+LINK_TNR3_TNR2_ID, LINK_TNR3_TNR2 = compose_link(DEV_TNR3_EPS[3], DEV_TNR2_EPS[2])
+LINK_TNR4_TNR3_ID, LINK_TNR4_TNR3 = compose_link(DEV_TNR4_EPS[3], DEV_TNR3_EPS[2])
+LINK_TNR1_TNR4_ID, LINK_TNR1_TNR4 = compose_link(DEV_TNR1_EPS[3], DEV_TNR4_EPS[2])
+LINK_TNR3_TNR1_ID, LINK_TNR3_TNR1 = compose_link(DEV_TNR3_EPS[4], DEV_TNR1_EPS[4])
+LINK_TNR4_TNR2_ID, LINK_TNR4_TNR2 = compose_link(DEV_TNR4_EPS[4], DEV_TNR2_EPS[4])
+
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
@@ -151,41 +172,44 @@ DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
                 DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4,
             ]
 LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_DC1GW, LINK_CS1GW2_DC1GW, LINK_CS2GW1_DC2GW, LINK_CS2GW2_DC2GW,
+
                 LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
                 LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
                 LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4,
+                LINK_TNR2_TNR1, LINK_TNR3_TNR2, LINK_TNR4_TNR3, LINK_TNR1_TNR4, LINK_TNR3_TNR1, LINK_TNR4_TNR2,
             ]
 SERVICES   = [  SERVICE_DC1GW_DC2GW   ]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPO_ADMIN_ID,
-        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
-            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
-            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-        ],
-        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
-            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
-            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
-            LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
-            LINK_TNR2_TNR4_ID,
-        ],
-    ),
-    (TOPO_DC1_ID,
-        [DEV_DC1GW_ID],
-        []),
-    (TOPO_DC2_ID,
-        [DEV_DC2GW_ID],
-        []),
-    (TOPO_CS1_ID,
-        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
-        []),
-    (TOPO_CS2_ID,
-        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
-        []),
-    (TOPO_TN_ID,
-        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-        ],
-        [   LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
-            LINK_TNR2_TNR4_ID,
-        ]),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPO_ADMIN_ID,
+#        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+#            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+#            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#        ],
+#        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+#            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+#            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+#            LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+#            LINK_TNR2_TNR4_ID,
+#        ],
+#    ),
+#    (TOPO_DC1_ID,
+#        [DEV_DC1GW_ID],
+#        []),
+#    (TOPO_DC2_ID,
+#        [DEV_DC2GW_ID],
+#        []),
+#    (TOPO_CS1_ID,
+#        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+#        []),
+#    (TOPO_CS2_ID,
+#        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+#        []),
+#    (TOPO_TN_ID,
+#        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#        ],
+#        [   LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+#            LINK_TNR2_TNR4_ID,
+#        ]),
+#]
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
index 71510d088746bd791e4671686dd5114874dd5a2a..2c8428568c001a53cbf2c08aa13b61ad14a1bd51 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
@@ -130,6 +130,11 @@ LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1
 LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
 LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
 
+LINK_CS1GW1_DC1GW_ID, LINK_CS1GW1_DC1GW = compose_link(DEV_CS1GW1_EPS[0], DEV_DC1GW_EPS[0])
+LINK_CS1GW2_DC1GW_ID, LINK_CS1GW2_DC1GW = compose_link(DEV_CS1GW2_EPS[0], DEV_DC1GW_EPS[1])
+LINK_CS2GW1_DC2GW_ID, LINK_CS2GW1_DC2GW = compose_link(DEV_CS2GW1_EPS[0], DEV_DC2GW_EPS[0])
+LINK_CS2GW2_DC2GW_ID, LINK_CS2GW2_DC2GW = compose_link(DEV_CS2GW2_EPS[0], DEV_DC2GW_EPS[1])
+
 # InterDomain CSGW-TN
 LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
 LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
@@ -140,12 +145,26 @@ LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4
 LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
 LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
 
+LINK_TNR1_CS1GW1_ID, LINK_TNR1_CS1GW1 = compose_link(DEV_TNR1_EPS[0], DEV_CS1GW1_EPS[1])
+LINK_TNR2_CS1GW2_ID, LINK_TNR2_CS1GW2 = compose_link(DEV_TNR2_EPS[0], DEV_CS1GW2_EPS[1])
+LINK_TNR2_CS1GW1_ID, LINK_TNR2_CS1GW1 = compose_link(DEV_TNR2_EPS[1], DEV_CS1GW1_EPS[2])
+LINK_TNR1_CS1GW2_ID, LINK_TNR1_CS1GW2 = compose_link(DEV_TNR1_EPS[1], DEV_CS1GW2_EPS[2])
+LINK_TNR3_CS2GW1_ID, LINK_TNR3_CS2GW1 = compose_link(DEV_TNR3_EPS[0], DEV_CS2GW1_EPS[1])
+LINK_TNR4_CS2GW2_ID, LINK_TNR4_CS2GW2 = compose_link(DEV_TNR4_EPS[0], DEV_CS2GW2_EPS[1])
+LINK_TNR4_CS2GW1_ID, LINK_TNR4_CS2GW1 = compose_link(DEV_TNR4_EPS[1], DEV_CS2GW1_EPS[2])
+LINK_TNR3_CS2GW2_ID, LINK_TNR3_CS2GW2 = compose_link(DEV_TNR3_EPS[1], DEV_CS2GW2_EPS[2])
+
 # IntraDomain TN
 LINK_TNR1_TOLS_ID, LINK_TNR1_TOLS = compose_link(DEV_TNR1_EPS[2], DEV_TOLS_EPS[0])
 LINK_TNR2_TOLS_ID, LINK_TNR2_TOLS = compose_link(DEV_TNR2_EPS[2], DEV_TOLS_EPS[1])
 LINK_TNR3_TOLS_ID, LINK_TNR3_TOLS = compose_link(DEV_TNR3_EPS[2], DEV_TOLS_EPS[2])
 LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3])
 
+LINK_TOLS_TNR1_ID, LINK_TOLS_TNR1 = compose_link(DEV_TOLS_EPS[0], DEV_TNR1_EPS[2])
+LINK_TOLS_TNR2_ID, LINK_TOLS_TNR2 = compose_link(DEV_TOLS_EPS[1], DEV_TNR2_EPS[2])
+LINK_TOLS_TNR3_ID, LINK_TOLS_TNR3 = compose_link(DEV_TOLS_EPS[2], DEV_TNR3_EPS[2])
+LINK_TOLS_TNR4_ID, LINK_TOLS_TNR4 = compose_link(DEV_TOLS_EPS[3], DEV_TNR4_EPS[2])
+
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
@@ -162,41 +181,47 @@ DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
                 DEV_TOLS,
             ]
 LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_DC1GW, LINK_CS1GW2_DC1GW, LINK_CS2GW1_DC2GW, LINK_CS2GW2_DC2GW,
+
                 LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
                 LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
+                LINK_TNR1_CS1GW1, LINK_TNR2_CS1GW2, LINK_TNR2_CS1GW1, LINK_TNR1_CS1GW2,
+                LINK_TNR3_CS2GW1, LINK_TNR4_CS2GW2, LINK_TNR4_CS2GW1, LINK_TNR3_CS2GW2,
+
                 LINK_TNR1_TOLS, LINK_TNR2_TOLS, LINK_TNR3_TOLS, LINK_TNR4_TOLS,
+                LINK_TOLS_TNR1, LINK_TOLS_TNR2, LINK_TOLS_TNR3, LINK_TOLS_TNR4,
             ]
 SERVICES   = [  SERVICE_DC1GW_DC2GW   ]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPO_ADMIN_ID,
-        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
-            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
-            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-            DEV_TOLS_ID,
-        ],
-        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
-            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
-            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
-            LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
-        ],
-    ),
-    (TOPO_DC1_ID,
-        [DEV_DC1GW_ID],
-        []),
-    (TOPO_DC2_ID,
-        [DEV_DC2GW_ID],
-        []),
-    (TOPO_CS1_ID,
-        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
-        []),
-    (TOPO_CS2_ID,
-        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
-        []),
-    (TOPO_TN_ID,
-        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-            DEV_TOLS_ID,
-        ],
-        [   LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
-        ]),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPO_ADMIN_ID,
+#        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+#            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+#            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#            DEV_TOLS_ID,
+#        ],
+#        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+#            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+#            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+#            LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+#        ],
+#    ),
+#    (TOPO_DC1_ID,
+#        [DEV_DC1GW_ID],
+#        []),
+#    (TOPO_DC2_ID,
+#        [DEV_DC2GW_ID],
+#        []),
+#    (TOPO_CS1_ID,
+#        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+#        []),
+#    (TOPO_CS2_ID,
+#        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+#        []),
+#    (TOPO_TN_ID,
+#        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#            DEV_TOLS_ID,
+#        ],
+#        [   LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+#        ]),
+#]
diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py
index 8088259b80b8ade2669568b74f004dcfa631dd9c..f4e3cbf0f60285b960625a677854c4b7ab4decb9 100644
--- a/src/pathcomp/frontend/tests/test_unitary.py
+++ b/src/pathcomp/frontend/tests/test_unitary.py
@@ -13,12 +13,15 @@
 # limitations under the License.
 
 import copy, logging, os
-from common.proto.context_pb2 import Context, ContextId, DeviceId, Link, LinkId, Topology, Device, TopologyId
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId
 from common.proto.pathcomp_pb2 import PathCompRequest
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Constraint import (
     json_constraint_custom, json_constraint_endpoint_location_region, json_constraint_endpoint_priority,
     json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_latency)
+from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.tools.object_factory.Service import json_service_l3nm_planned
@@ -26,9 +29,9 @@ from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 
 # Scenarios:
-#from .Objects_A_B_C import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
-#from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
-from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
+#from .Objects_A_B_C import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
+#from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
+from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
 
 # configure backend environment variables before overwriting them with fixtures to use real backend pathcomp
 DEFAULT_PATHCOMP_BACKEND_SCHEME  = 'http'
@@ -58,31 +61,29 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-def test_prepare_environment(
-    context_client : ContextClient):    # pylint: disable=redefined-outer-name
-
-    for context  in CONTEXTS  : context_client.SetContext (Context (**context ))
-    for topology in TOPOLOGIES: context_client.SetTopology(Topology(**topology))
-    for device   in DEVICES   : context_client.SetDevice  (Device  (**device  ))
-    for link     in LINKS     : context_client.SetLink    (Link    (**link    ))
-
-    for topology_id, device_ids, link_ids in OBJECTS_PER_TOPOLOGY:
-        topology = Topology()
-        topology.CopyFrom(context_client.GetTopology(TopologyId(**topology_id)))
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+DESCRIPTORS = {
+    'dummy_mode': True,
+    'contexts'  : CONTEXTS,
+    'topologies': TOPOLOGIES,
+    'devices'   : DEVICES,
+    'links'     : LINKS,
+}
 
-        device_ids_in_topology = {device_id.device_uuid.uuid for device_id in topology.device_ids}
-        func_device_id_not_added = lambda device_id: device_id['device_uuid']['uuid'] not in device_ids_in_topology
-        func_device_id_json_to_grpc = lambda device_id: DeviceId(**device_id)
-        device_ids_to_add = list(map(func_device_id_json_to_grpc, filter(func_device_id_not_added, device_ids)))
-        topology.device_ids.extend(device_ids_to_add)
+def test_prepare_environment(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+) -> None:
+    validate_empty_scenario(context_client)
 
-        link_ids_in_topology = {link_id.link_uuid.uuid for link_id in topology.link_ids}
-        func_link_id_not_added = lambda link_id: link_id['link_uuid']['uuid'] not in link_ids_in_topology
-        func_link_id_json_to_grpc = lambda link_id: LinkId(**link_id)
-        link_ids_to_add = list(map(func_link_id_json_to_grpc, filter(func_link_id_not_added, link_ids)))
-        topology.link_ids.extend(link_ids_to_add)
+    descriptor_loader = DescriptorLoader(descriptors=DESCRIPTORS, context_client=context_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
 
-        context_client.SetTopology(topology)
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
 
 def test_request_service_shortestpath(
     pathcomp_client : PathCompClient):  # pylint: disable=redefined-outer-name
@@ -266,9 +267,15 @@ def test_request_service_kdisjointpath(
 
 
 def test_cleanup_environment(
-    context_client : ContextClient):    # pylint: disable=redefined-outer-name
-
-    for link     in LINKS     : context_client.RemoveLink    (LinkId    (**link    ['link_id'    ]))
-    for device   in DEVICES   : context_client.RemoveDevice  (DeviceId  (**device  ['device_id'  ]))
-    for topology in TOPOLOGIES: context_client.RemoveTopology(TopologyId(**topology['topology_id']))
-    for context  in CONTEXTS  : context_client.RemoveContext (ContextId (**context ['context_id' ]))
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+) -> None:
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors=DESCRIPTORS, context_client=context_client)
+    descriptor_loader.validate()
+    descriptor_loader.unload()
+    validate_empty_scenario(context_client)
diff --git a/src/policy/pom.xml b/src/policy/pom.xml
index 6ea28421abedf6916e998b6cfdebe23c34908c4a..267006311f82c11bce4db29f2d114f30c1832f88 100644
--- a/src/policy/pom.xml
+++ b/src/policy/pom.xml
@@ -179,6 +179,11 @@
             <scope>test</scope>
         </dependency>
 
+        <dependency>
+            <groupId>io.quarkus</groupId>
+            <artifactId>quarkus-smallrye-metrics</artifactId>
+        </dependency>
+
     </dependencies>
 
     <build>
diff --git a/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java b/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
index c10e5dc8b91ee9dcc2ae8aa74526faeb4e4bfcec..30e888d9fab1aae535dca345c7c56e28218bd2c2 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
@@ -20,6 +20,9 @@ import context.ContextOuterClass.ServiceId;
 import io.quarkus.grpc.GrpcService;
 import io.smallrye.mutiny.Uni;
 import javax.inject.Inject;
+import org.eclipse.microprofile.metrics.MetricUnits;
+import org.eclipse.microprofile.metrics.annotation.Counted;
+import org.eclipse.microprofile.metrics.annotation.Timed;
 import policy.Policy;
 import policy.Policy.PolicyRuleBasic;
 import policy.Policy.PolicyRuleDevice;
@@ -41,6 +44,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyAddService_counter")
+    @Timed(name = "policy_policyAddService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyAddService(PolicyRuleService request) {
         final var policyRuleService = serializer.deserialize(request);
 
@@ -51,6 +56,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyUpdateService_counter")
+    @Timed(name = "policy_policyUpdateService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyUpdateService(PolicyRuleService request) {
         final var policyRuleService = serializer.deserialize(request);
 
@@ -61,6 +68,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyAddDevice_counter")
+    @Timed(name = "policy_policyAddDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyAddDevice(PolicyRuleDevice request) {
         final var policyRuleDevice = serializer.deserialize(request);
 
@@ -71,6 +80,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyUpdateDevice_counter")
+    @Timed(name = "policy_policyUpdateDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyUpdateDevice(PolicyRuleDevice request) {
         final var policyRuleDevice = serializer.deserialize(request);
 
@@ -81,6 +92,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyDelete_counter")
+    @Timed(name = "policy_policyDelete_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyDelete(PolicyRuleId request) {
         final var policyRuleId = serializer.deserialize(request);
 
@@ -88,6 +101,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyService_counter")
+    @Timed(name = "policy_getPolicyService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleService> getPolicyService(PolicyRuleId request) {
         final var policyRuleBasic = PolicyRuleBasic.newBuilder().setPolicyRuleId(request).build();
 
@@ -96,6 +111,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyDevice_counter")
+    @Timed(name = "policy_getPolicyDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleDevice> getPolicyDevice(PolicyRuleId request) {
         final var policyRuleBasic = PolicyRuleBasic.newBuilder().setPolicyRuleId(request).build();
 
@@ -104,6 +121,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyByServiceId_counter")
+    @Timed(name = "policy_getPolicyByServiceId_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleServiceList> getPolicyByServiceId(ServiceId request) {
         return Uni.createFrom().item(() -> Policy.PolicyRuleServiceList.newBuilder().build());
     }
diff --git a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
index daee299ddf64327c0d782e640cd1e924e139dccb..ad763e35dfeef71c2f9f73dbf51785a3e03c0e0d 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
@@ -23,5 +23,6 @@ public enum DeviceDriverEnum {
     P4,
     IETF_NETWORK_TOPOLOGY,
     ONF_TR_352,
-    XR
+    XR,
+    IETF_L2VPN
 }
diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
index 64102646119585e1f837b12a9be022d95a29c54f..b0fb90864ce32bf6b793dded5d1f9de1dfba5097 100644
--- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
+++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
@@ -3601,7 +3601,8 @@ class SerializerTest {
                         ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352),
                 Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR),
                 Arguments.of(
-                        DeviceDriverEnum.IETF_L2VPN, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN),
+                        DeviceDriverEnum.IETF_L2VPN,
+                        ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN),
                 Arguments.of(
                         DeviceDriverEnum.UNDEFINED, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED));
     }
diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml
index 40516e5cc3fdd1fb993a1248ad36ea7551edfc40..72da09ecaf1de9d080d686c63c0f18c88f09e8b4 100644
--- a/src/policy/target/kubernetes/kubernetes.yml
+++ b/src/policy/target/kubernetes/kubernetes.yml
@@ -17,16 +17,16 @@ apiVersion: v1
 kind: Service
 metadata:
   annotations:
-    app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-    app.quarkus.io/build-timestamp: 2022-11-18 - 12:56:37 +0000
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
   labels:
     app.kubernetes.io/name: policyservice
     app: policyservice
   name: policyservice
 spec:
   ports:
-    - name: http
-      port: 8080
+    - name: metrics
+      port: 9192
       targetPort: 8080
     - name: grpc
       port: 6060
@@ -39,8 +39,8 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   annotations:
-    app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-    app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
   labels:
     app: policyservice
     app.kubernetes.io/name: policyservice
@@ -53,8 +53,8 @@ spec:
   template:
     metadata:
       annotations:
-        app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-        app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000
+        app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+        app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
       labels:
         app: policyservice
         app.kubernetes.io/name: policyservice
@@ -65,12 +65,12 @@ spec:
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.namespace
-            - name: MONITORING_SERVICE_HOST
-              value: monitoringservice
-            - name: CONTEXT_SERVICE_HOST
-              value: contextservice
             - name: SERVICE_SERVICE_HOST
               value: serviceservice
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
+            - name: MONITORING_SERVICE_HOST
+              value: monitoringservice
           image: labs.etsi.org:5050/tfs/controller/policy:0.1.0
           imagePullPolicy: Always
           livenessProbe:
@@ -86,10 +86,10 @@ spec:
           name: policyservice
           ports:
             - containerPort: 8080
-              name: http
+              name: metrics
               protocol: TCP
             - containerPort: 6060
-              name: grpc
+              name: grpc-server
               protocol: TCP
           readinessProbe:
             failureThreshold: 3
@@ -101,3 +101,29 @@ spec:
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: policyservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: policyservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
\ No newline at end of file
diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
index 363983b8653e1cfa553279d2df74d6ac893a4fec..ac44574ad60242b0acf21ba824ea448d5ec30bf1 100644
--- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
+++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
@@ -105,17 +105,17 @@ def teardown_config_rules(
 
     if_cirid_name         = '{:s}.{:s}'.format(endpoint_name, str(circuit_id))
     network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id))
-    #connection_point_id   = 'VC-1'
+    connection_point_id   = 'VC-1'
 
     json_config_rules = [
-        #json_config_rule_delete(
-        #    '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
-        #    {'name': network_instance_name, 'connection_point': connection_point_id}),
+        json_config_rule_delete(
+            '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
+            {'name': network_instance_name, 'connection_point': connection_point_id}),
 
-        #json_config_rule_delete(
-        #    '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
-        #    {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
-        #    'subinterface': sub_interface_index}),
+        json_config_rule_delete(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
+            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
+            'subinterface': sub_interface_index}),
 
         json_config_rule_delete(
             '/network_instance[{:s}]'.format(network_instance_name),
diff --git a/src/service/service/service_handlers/p4/p4_service_handler.py b/src/service/service/service_handlers/p4/p4_service_handler.py
index 6f2cfb5a9bc4dac991eecd14ba7b6eb1218bdaa2..8d609c11c9c1c4f25c0d387290c11de36af69a9a 100644
--- a/src/service/service/service_handlers/p4/p4_service_handler.py
+++ b/src/service/service/service_handlers/p4/p4_service_handler.py
@@ -16,18 +16,35 @@
 P4 service handler for the TeraFlowSDN controller.
 """
 
-import anytree, json, logging
-from typing import Any, Dict, List, Optional, Tuple, Union
-from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service
-from common.tools.object_factory.ConfigRule import json_config_rule, json_config_rule_delete, json_config_rule_set
+import logging
+from typing import Any, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
 from common.tools.object_factory.Device import json_device_id
-from common.type_checkers.Checkers import chk_type, chk_length
+from common.type_checkers.Checkers import chk_type
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
-from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value
 from service.service.task_scheduler.TaskExecutor import TaskExecutor
 
 LOGGER = logging.getLogger(__name__)
 
+HISTOGRAM_BUCKETS = (
+    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
+    0.0010, 0.0025, 0.0050, 0.0075,
+    0.0100, 0.0250, 0.0500, 0.0750,
+    0.1000, 0.2500, 0.5000, 0.7500,
+    1.0000, 2.5000, 5.0000, 7.5000,
+    10.0000, 25.000, 50.0000, 75.000,
+    100.0, INF
+)
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4'})
+METRICS_POOL.get_or_create('SetEndpoint',      MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('DeleteEndpoint',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('SetConstraint',    MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+
 def create_rule_set(endpoint_a, endpoint_b):
     return json_config_rule_set(
         'table',
@@ -99,6 +116,7 @@ class P4ServiceHandler(_ServiceHandler):
         self.__service = service
         self.__task_executor = task_executor # pylint: disable=unused-private-member
 
+    @metered_subclass_method(METRICS_POOL)
     def SetEndpoint(
         self, endpoints : List[Tuple[str, str, Optional[str]]],
         connection_uuid : Optional[str] = None
@@ -169,6 +187,7 @@ class P4ServiceHandler(_ServiceHandler):
 
         return results
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteEndpoint(
         self, endpoints : List[Tuple[str, str, Optional[str]]],
         connection_uuid : Optional[str] = None
@@ -239,6 +258,7 @@ class P4ServiceHandler(_ServiceHandler):
 
         return results
 
+    @metered_subclass_method(METRICS_POOL)
     def SetConstraint(self, constraints: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Create/Update service constraints.
@@ -261,6 +281,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(constraints)))
         return [True for _ in range(len(constraints))]
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Delete service constraints.
@@ -285,6 +306,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(constraints)))
         return [True for _ in range(len(constraints))]
 
+    @metered_subclass_method(METRICS_POOL)
     def SetConfig(self, resources: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Create/Update configuration for a list of service resources.
@@ -308,6 +330,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(resources)))
         return [True for _ in range(len(resources))]
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteConfig(self, resources: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Delete configuration for a list of service resources.
diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json
index aa76edecd116ee7336fc1a2621d2bc3ae95080ce..b68b9636d58d9c80c4774e4ade557f83796ac5b5 100644
--- a/src/tests/ofc22/descriptors_emulated.json
+++ b/src/tests/ofc22/descriptors_emulated.json
@@ -97,6 +97,35 @@
                 {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}},
                 {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
             ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R1-EMU/13/0/0/aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+                {"device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R2-EMU/13/0/0/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+                {"device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R3-EMU/13/0/0/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+                {"device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R4-EMU/13/0/0/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}},
+                {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
         }
     ]
 }
\ No newline at end of file
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 32cefddf3b2a8251623b60fd9fc039588cd6b9bb..75f036befd4bed3bb3bd743b9f423bf21c014e55 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -131,25 +131,18 @@ def topology():
         topology_uuid = session['topology_uuid']
 
         json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
-        grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
+        response = context_client.GetTopologyDetails(TopologyId(**json_topo_id))
 
-        topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}
-        topo_link_uuids   = {link_id  .link_uuid  .uuid for link_id   in grpc_topology.link_ids  }
-
-        response = context_client.ListDevices(Empty())
         devices = []
         for device in response.devices:
-            if device.device_id.device_uuid.uuid not in topo_device_uuids: continue
             devices.append({
                 'id': device.device_id.device_uuid.uuid,
                 'name': device.name,
                 'type': device.device_type,
             })
 
-        response = context_client.ListLinks(Empty())
         links = []
         for link in response.links:
-            if link.link_id.link_uuid.uuid not in topo_link_uuids: continue
             if len(link.link_endpoint_ids) != 2:
                 str_link = grpc_message_to_json_string(link)
                 LOGGER.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link))