diff --git a/deploy/all.sh b/deploy/all.sh
index 6f8331b769b6f84a13ac66b48ca2f861a8308ce5..9584dd32d121b7f63e7c7f177bf7bee8c287b4c9 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -147,6 +147,15 @@ export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
 
 
+# ----- K8s Observability ------------------------------------------------------
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -160,6 +169,9 @@ export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
 # Deploy QuestDB
 ./deploy/qdb.sh
 
+# Expose Dashboard
+./deploy/expose_dashboard.sh
+
 # Deploy TeraFlowSDN
 ./deploy/tfs.sh
 
diff --git a/deploy/crdb.sh b/deploy/crdb.sh
index 216339117d2156d0ae1beddb5a1d6a7ccbe33219..414de523d10f7d1edb99799e1f5889b340d8ad04 100755
--- a/deploy/crdb.sh
+++ b/deploy/crdb.sh
@@ -167,6 +167,11 @@ function crdb_drop_database_single() {
 }
 
 function crdb_deploy_cluster() {
+    echo "CockroachDB Operator Namespace"
+    echo ">>> Create CockroachDB Operator Namespace (if missing)"
+    kubectl apply -f "${CRDB_MANIFESTS_PATH}/pre_operator.yaml"
+    echo
+
     echo "Cockroach Operator CRDs"
     echo ">>> Apply Cockroach Operator CRDs (if they are missing)"
     cp "${CRDB_MANIFESTS_PATH}/crds.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml"
diff --git a/deploy/expose_dashboard.sh b/deploy/expose_dashboard.sh
new file mode 100755
index 0000000000000000000000000000000000000000..60b41c7b75d4f96a22151b1d4d68ba53c75a265c
--- /dev/null
+++ b/deploy/expose_dashboard.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+function expose_dashboard() {
+    echo "Prometheus Port Mapping"
+    echo ">>> Expose Prometheus HTTP Mgmt GUI port (9090->${PROM_EXT_PORT_HTTP})"
+    PROM_PORT_HTTP=$(kubectl --namespace monitoring get service prometheus-k8s -o 'jsonpath={.spec.ports[?(@.name=="web")].port}')
+    PATCH='{"data": {"'${PROM_EXT_PORT_HTTP}'": "monitoring/prometheus-k8s:'${PROM_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${PROM_EXT_PORT_HTTP}', "hostPort": '${PROM_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo "Grafana Port Mapping"
+    echo ">>> Expose Grafana HTTP Mgmt GUI port (3000->${GRAF_EXT_PORT_HTTP})"
+    GRAF_PORT_HTTP=$(kubectl --namespace monitoring get service grafana -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+    PATCH='{"data": {"'${GRAF_EXT_PORT_HTTP}'": "monitoring/grafana:'${GRAF_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${GRAF_EXT_PORT_HTTP}', "hostPort": '${GRAF_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+expose_dashboard
diff --git a/deploy/nats.sh b/deploy/nats.sh
index aa082b54ba8806c48f9b5a04c61f110b93b03d6a..b730cec4af66920e5a7d8a2235e63beff70e8694 100755
--- a/deploy/nats.sh
+++ b/deploy/nats.sh
@@ -53,7 +53,7 @@ function nats_deploy_single() {
         echo ">>> NATS is present; skipping step."
     else
         echo ">>> Deploy NATS"
-        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image.tag=2.9-alpine
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
 
         echo ">>> Waiting NATS statefulset to be created..."
         while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 0d0461fa89073d9a1c98e94b5fd4610e191c7a06..54db3253f4975516056d98488ffdab375541c532 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -106,6 +106,15 @@ export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kp
 export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 
 
+# ----- K8s Observability ------------------------------------------------------
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -241,7 +250,8 @@ for COMPONENT in $TFS_COMPONENTS; do
 
     echo "  Adapting '$COMPONENT' manifest file..."
     MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
-    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    # cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
 
     if [ "$COMPONENT" == "pathcomp" ]; then
         IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
@@ -335,7 +345,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     # Exposed through the ingress controller "tfs-ingress"
     GRAFANA_URL="127.0.0.1:${EXT_HTTP_PORT}/grafana"
 
-    # Default Grafana credentials
+    # Default Grafana credentials when installed with the `monitoring` addon
     GRAFANA_USERNAME="admin"
     GRAFANA_PASSWORD="admin"
 
@@ -412,25 +422,84 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
         },
         "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
     }' ${GRAFANA_URL_UPDATED}/api/datasources
+    echo
+
+    # adding the datasource of the metrics collection framework
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "prometheus",
+        "name"     : "prometheus",
+        "url"      : "http://prometheus-k8s.monitoring.svc:9090",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "httpMethod"               : "POST"
+        }
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
     printf "\n\n"
 
-    echo ">> Creating dashboards..."
+    echo ">> Creating and staring dashboards..."
     # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
+
+    # Dashboard: L3 Monitoring KPIs
     curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
         ${GRAFANA_URL_UPDATED}/api/dashboards/db
     echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
 
+    # Dashboard: Slice Grouping
     curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \
         ${GRAFANA_URL_UPDATED}/api/dashboards/db
-    printf "\n\n"
+    echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
 
-    echo ">> Staring dashboards..."
-    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
+    # Dashboard: Component RPCs
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_component_rpc.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-comp-rpc"
     DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
     curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
     echo
 
-    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
+    # Dashboard: Device Drivers
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_driver.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-drv"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    # Dashboard: Service Handlers
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_service_handler.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-svc-hdlr"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    # Dashboard: Device ConfigureDevice Details
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_config_exec_details.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-confdev"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    # Dashboard: Load Generator Status
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_load_generator.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-loadgen-stats"
     DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
     curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
     echo
diff --git a/manifests/automationservice.yaml b/manifests/automationservice.yaml
deleted file mode 120000
index 5e8d3c1c82db0c03119f29865e2a7edabcdfb0eb..0000000000000000000000000000000000000000
--- a/manifests/automationservice.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../src/automation/target/kubernetes/kubernetes.yml
\ No newline at end of file
diff --git a/manifests/automationservice.yaml b/manifests/automationservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..73e6b1d7be076dbcf55014ae3accbc1e29e0c8e8
--- /dev/null
+++ b/manifests/automationservice.yaml
@@ -0,0 +1,125 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+  labels:
+    app.kubernetes.io/name: automationservice
+    app: automationservice
+  name: automationservice
+spec:
+  ports:
+    - name: grpc
+      port: 5050
+      targetPort: 5050
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 8080
+  selector:
+    app.kubernetes.io/name: automationservice
+  type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  annotations:
+    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+  labels:
+    app: automationservice
+    app.kubernetes.io/name: automationservice
+  name: automationservice
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: automationservice
+  template:
+    metadata:
+      annotations:
+        app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+      labels:
+        app: automationservice
+        app.kubernetes.io/name: automationservice
+    spec:
+      containers:
+        - env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
+            - name: DEVICE_SERVICE_HOST
+              value: deviceservice
+          image: labs.etsi.org:5050/tfs/controller/automation:0.2.0
+          imagePullPolicy: Always
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/live
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          name: automationservice
+          ports:
+            - containerPort: 5050
+              name: grpc
+              protocol: TCP
+            - containerPort: 8080
+              name: metrics
+              protocol: TCP
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/ready
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: automationservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: automationservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
diff --git a/manifests/cockroachdb/client-secure-operator.yaml b/manifests/cockroachdb/client-secure-operator.yaml
index f7f81c8339d4ba47722a0ef2a2236178f1b9e1b0..ee3afbc5ae5feec673dc5f507f8bc794757818c7 100644
--- a/manifests/cockroachdb/client-secure-operator.yaml
+++ b/manifests/cockroachdb/client-secure-operator.yaml
@@ -1,4 +1,4 @@
-# Copyright 2022 The Cockroach Authors
+# Copyright 2023 The Cockroach Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ spec:
   serviceAccountName: cockroachdb-sa
   containers:
   - name: cockroachdb-client-secure
-    image: cockroachdb/cockroach:v22.2.0
+    image: cockroachdb/cockroach:v22.2.8
     imagePullPolicy: IfNotPresent
     volumeMounts:
     - name: client-certs
diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml
index f7444c0067cc9c2c07b53c85d765bb81d1c20c05..4d9ef0f844b5ffb02753b6cc7a7be7d03928896c 100644
--- a/manifests/cockroachdb/cluster.yaml
+++ b/manifests/cockroachdb/cluster.yaml
@@ -1,4 +1,4 @@
-# Copyright 2022 The Cockroach Authors
+# Copyright 2023 The Cockroach Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -33,16 +33,16 @@ spec:
   resources:
     requests:
       # This is intentionally low to make it work on local k3d clusters.
-      cpu: 100m
-      memory: 1Gi
-    limits:
-      cpu: 1
+      cpu: 4
       memory: 4Gi
+    limits:
+      cpu: 8
+      memory: 8Gi
   tlsEnabled: true
 # You can set either a version of the db or a specific image name
-# cockroachDBVersion: v22.2.0
+# cockroachDBVersion: v22.2.8
   image:
-    name: cockroachdb/cockroach:v22.2.0
+    name: cockroachdb/cockroach:v22.2.8
   # nodes refers to the number of crdb pods that are created
   # via the statefulset
   nodes: 3
diff --git a/manifests/cockroachdb/crds.yaml b/manifests/cockroachdb/crds.yaml
index 1b5cd89ae7001b3e200c0de7da240b660c461f3b..2ef9983924f639a82d2091907384be18e6c8c1f4 100644
--- a/manifests/cockroachdb/crds.yaml
+++ b/manifests/cockroachdb/crds.yaml
@@ -1,4 +1,4 @@
-# Copyright 2022 The Cockroach Authors
+# Copyright 2023 The Cockroach Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -354,10 +354,71 @@ spec:
                                         The requirements are ANDed.
                                       type: object
                                   type: object
+                                namespaceSelector:
+                                  description: A label query over the set of namespaces
+                                    that the term applies to. The term is applied
+                                    to the union of the namespaces selected by this
+                                    field and the ones listed in the namespaces field.
+                                    null selector and null or empty namespaces list
+                                    means "this pod's namespace". An empty selector
+                                    ({}) matches all namespaces. This field is alpha-level
+                                    and is only honored when PodAffinityNamespaceSelector
+                                    feature is enabled.
+                                  properties:
+                                    matchExpressions:
+                                      description: matchExpressions is a list of label
+                                        selector requirements. The requirements are
+                                        ANDed.
+                                      items:
+                                        description: A label selector requirement
+                                          is a selector that contains values, a key,
+                                          and an operator that relates the key and
+                                          values.
+                                        properties:
+                                          key:
+                                            description: key is the label key that
+                                              the selector applies to.
+                                            type: string
+                                          operator:
+                                            description: operator represents a key's
+                                              relationship to a set of values. Valid
+                                              operators are In, NotIn, Exists and
+                                              DoesNotExist.
+                                            type: string
+                                          values:
+                                            description: values is an array of string
+                                              values. If the operator is In or NotIn,
+                                              the values array must be non-empty.
+                                              If the operator is Exists or DoesNotExist,
+                                              the values array must be empty. This
+                                              array is replaced during a strategic
+                                              merge patch.
+                                            items:
+                                              type: string
+                                            type: array
+                                        required:
+                                        - key
+                                        - operator
+                                        type: object
+                                      type: array
+                                    matchLabels:
+                                      additionalProperties:
+                                        type: string
+                                      description: matchLabels is a map of {key,value}
+                                        pairs. A single {key,value} in the matchLabels
+                                        map is equivalent to an element of matchExpressions,
+                                        whose key field is "key", the operator is
+                                        "In", and the values array contains only "value".
+                                        The requirements are ANDed.
+                                      type: object
+                                  type: object
                                 namespaces:
-                                  description: namespaces specifies which namespaces
-                                    the labelSelector applies to (matches against);
-                                    null or empty list means "this pod's namespace"
+                                  description: namespaces specifies a static list
+                                    of namespace names that the term applies to. The
+                                    term is applied to the union of the namespaces
+                                    listed in this field and the ones selected by
+                                    namespaceSelector. null or empty namespaces list
+                                    and null namespaceSelector means "this pod's namespace"
                                   items:
                                     type: string
                                   type: array
@@ -449,10 +510,66 @@ spec:
                                     requirements are ANDed.
                                   type: object
                               type: object
+                            namespaceSelector:
+                              description: A label query over the set of namespaces
+                                that the term applies to. The term is applied to the
+                                union of the namespaces selected by this field and
+                                the ones listed in the namespaces field. null selector
+                                and null or empty namespaces list means "this pod's
+                                namespace". An empty selector ({}) matches all namespaces.
+                                This field is alpha-level and is only honored when
+                                PodAffinityNamespaceSelector feature is enabled.
+                              properties:
+                                matchExpressions:
+                                  description: matchExpressions is a list of label
+                                    selector requirements. The requirements are ANDed.
+                                  items:
+                                    description: A label selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: key is the label key that the
+                                          selector applies to.
+                                        type: string
+                                      operator:
+                                        description: operator represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists and DoesNotExist.
+                                        type: string
+                                      values:
+                                        description: values is an array of string
+                                          values. If the operator is In or NotIn,
+                                          the values array must be non-empty. If the
+                                          operator is Exists or DoesNotExist, the
+                                          values array must be empty. This array is
+                                          replaced during a strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchLabels:
+                                  additionalProperties:
+                                    type: string
+                                  description: matchLabels is a map of {key,value}
+                                    pairs. A single {key,value} in the matchLabels
+                                    map is equivalent to an element of matchExpressions,
+                                    whose key field is "key", the operator is "In",
+                                    and the values array contains only "value". The
+                                    requirements are ANDed.
+                                  type: object
+                              type: object
                             namespaces:
-                              description: namespaces specifies which namespaces the
-                                labelSelector applies to (matches against); null or
-                                empty list means "this pod's namespace"
+                              description: namespaces specifies a static list of namespace
+                                names that the term applies to. The term is applied
+                                to the union of the namespaces listed in this field
+                                and the ones selected by namespaceSelector. null or
+                                empty namespaces list and null namespaceSelector means
+                                "this pod's namespace"
                               items:
                                 type: string
                               type: array
@@ -546,10 +663,71 @@ spec:
                                         The requirements are ANDed.
                                       type: object
                                   type: object
+                                namespaceSelector:
+                                  description: A label query over the set of namespaces
+                                    that the term applies to. The term is applied
+                                    to the union of the namespaces selected by this
+                                    field and the ones listed in the namespaces field.
+                                    null selector and null or empty namespaces list
+                                    means "this pod's namespace". An empty selector
+                                    ({}) matches all namespaces. This field is alpha-level
+                                    and is only honored when PodAffinityNamespaceSelector
+                                    feature is enabled.
+                                  properties:
+                                    matchExpressions:
+                                      description: matchExpressions is a list of label
+                                        selector requirements. The requirements are
+                                        ANDed.
+                                      items:
+                                        description: A label selector requirement
+                                          is a selector that contains values, a key,
+                                          and an operator that relates the key and
+                                          values.
+                                        properties:
+                                          key:
+                                            description: key is the label key that
+                                              the selector applies to.
+                                            type: string
+                                          operator:
+                                            description: operator represents a key's
+                                              relationship to a set of values. Valid
+                                              operators are In, NotIn, Exists and
+                                              DoesNotExist.
+                                            type: string
+                                          values:
+                                            description: values is an array of string
+                                              values. If the operator is In or NotIn,
+                                              the values array must be non-empty.
+                                              If the operator is Exists or DoesNotExist,
+                                              the values array must be empty. This
+                                              array is replaced during a strategic
+                                              merge patch.
+                                            items:
+                                              type: string
+                                            type: array
+                                        required:
+                                        - key
+                                        - operator
+                                        type: object
+                                      type: array
+                                    matchLabels:
+                                      additionalProperties:
+                                        type: string
+                                      description: matchLabels is a map of {key,value}
+                                        pairs. A single {key,value} in the matchLabels
+                                        map is equivalent to an element of matchExpressions,
+                                        whose key field is "key", the operator is
+                                        "In", and the values array contains only "value".
+                                        The requirements are ANDed.
+                                      type: object
+                                  type: object
                                 namespaces:
-                                  description: namespaces specifies which namespaces
-                                    the labelSelector applies to (matches against);
-                                    null or empty list means "this pod's namespace"
+                                  description: namespaces specifies a static list
+                                    of namespace names that the term applies to. The
+                                    term is applied to the union of the namespaces
+                                    listed in this field and the ones selected by
+                                    namespaceSelector. null or empty namespaces list
+                                    and null namespaceSelector means "this pod's namespace"
                                   items:
                                     type: string
                                   type: array
@@ -641,10 +819,66 @@ spec:
                                     requirements are ANDed.
                                   type: object
                               type: object
+                            namespaceSelector:
+                              description: A label query over the set of namespaces
+                                that the term applies to. The term is applied to the
+                                union of the namespaces selected by this field and
+                                the ones listed in the namespaces field. null selector
+                                and null or empty namespaces list means "this pod's
+                                namespace". An empty selector ({}) matches all namespaces.
+                                This field is alpha-level and is only honored when
+                                PodAffinityNamespaceSelector feature is enabled.
+                              properties:
+                                matchExpressions:
+                                  description: matchExpressions is a list of label
+                                    selector requirements. The requirements are ANDed.
+                                  items:
+                                    description: A label selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: key is the label key that the
+                                          selector applies to.
+                                        type: string
+                                      operator:
+                                        description: operator represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists and DoesNotExist.
+                                        type: string
+                                      values:
+                                        description: values is an array of string
+                                          values. If the operator is In or NotIn,
+                                          the values array must be non-empty. If the
+                                          operator is Exists or DoesNotExist, the
+                                          values array must be empty. This array is
+                                          replaced during a strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchLabels:
+                                  additionalProperties:
+                                    type: string
+                                  description: matchLabels is a map of {key,value}
+                                    pairs. A single {key,value} in the matchLabels
+                                    map is equivalent to an element of matchExpressions,
+                                    whose key field is "key", the operator is "In",
+                                    and the values array contains only "value". The
+                                    requirements are ANDed.
+                                  type: object
+                              type: object
                             namespaces:
-                              description: namespaces specifies which namespaces the
-                                labelSelector applies to (matches against); null or
-                                empty list means "this pod's namespace"
+                              description: namespaces specifies a static list of namespace
+                                names that the term applies to. The term is applied
+                                to the union of the namespaces listed in this field
+                                and the ones selected by namespaceSelector. null or
+                                empty namespaces list and null namespaceSelector means
+                                "this pod's namespace"
                               items:
                                 type: string
                               type: array
@@ -767,7 +1001,7 @@ spec:
                                   pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
                                   x-kubernetes-int-or-string: true
                                 description: 'Limits describes the maximum amount
-                                  of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                                  of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
                                 type: object
                               requests:
                                 additionalProperties:
@@ -780,7 +1014,7 @@ spec:
                                   of compute resources required. If Requests is omitted
                                   for a container, it defaults to Limits if that is
                                   explicitly specified, otherwise to an implementation-defined
-                                  value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                                  value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
                                 type: object
                             type: object
                           selector:
@@ -1138,7 +1372,7 @@ spec:
                       pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
                       x-kubernetes-int-or-string: true
                     description: 'Limits describes the maximum amount of compute resources
-                      allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                      allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
                     type: object
                   requests:
                     additionalProperties:
@@ -1150,7 +1384,7 @@ spec:
                     description: 'Requests describes the minimum amount of compute
                       resources required. If Requests is omitted for a container,
                       it defaults to Limits if that is explicitly specified, otherwise
-                      to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                      to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
                     type: object
                 type: object
               sqlPort:
diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml
index 2be72d329b48bc6f45d66f811c299140cda85e27..59d515061c4c0f253523aab803653b3f33007461 100644
--- a/manifests/cockroachdb/operator.yaml
+++ b/manifests/cockroachdb/operator.yaml
@@ -1,4 +1,4 @@
-# Copyright 2022 The Cockroach Authors
+# Copyright 2023 The Cockroach Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -538,8 +538,34 @@ spec:
           value: cockroachdb/cockroach:v22.1.11
         - name: RELATED_IMAGE_COCKROACH_v22_1_12
           value: cockroachdb/cockroach:v22.1.12
+        - name: RELATED_IMAGE_COCKROACH_v22_1_13
+          value: cockroachdb/cockroach:v22.1.13
+        - name: RELATED_IMAGE_COCKROACH_v22_1_14
+          value: cockroachdb/cockroach:v22.1.14
+        - name: RELATED_IMAGE_COCKROACH_v22_1_15
+          value: cockroachdb/cockroach:v22.1.15
+        - name: RELATED_IMAGE_COCKROACH_v22_1_16
+          value: cockroachdb/cockroach:v22.1.16
+        - name: RELATED_IMAGE_COCKROACH_v22_1_18
+          value: cockroachdb/cockroach:v22.1.18
         - name: RELATED_IMAGE_COCKROACH_v22_2_0
           value: cockroachdb/cockroach:v22.2.0
+        - name: RELATED_IMAGE_COCKROACH_v22_2_1
+          value: cockroachdb/cockroach:v22.2.1
+        - name: RELATED_IMAGE_COCKROACH_v22_2_2
+          value: cockroachdb/cockroach:v22.2.2
+        - name: RELATED_IMAGE_COCKROACH_v22_2_3
+          value: cockroachdb/cockroach:v22.2.3
+        - name: RELATED_IMAGE_COCKROACH_v22_2_4
+          value: cockroachdb/cockroach:v22.2.4
+        - name: RELATED_IMAGE_COCKROACH_v22_2_5
+          value: cockroachdb/cockroach:v22.2.5
+        - name: RELATED_IMAGE_COCKROACH_v22_2_6
+          value: cockroachdb/cockroach:v22.2.6
+        - name: RELATED_IMAGE_COCKROACH_v22_2_7
+          value: cockroachdb/cockroach:v22.2.7
+        - name: RELATED_IMAGE_COCKROACH_v22_2_8
+          value: cockroachdb/cockroach:v22.2.8
         - name: OPERATOR_NAME
           value: cockroachdb
         - name: WATCH_NAMESPACE
@@ -552,7 +578,7 @@ spec:
           valueFrom:
             fieldRef:
               fieldPath: metadata.namespace
-        image: cockroachdb/cockroach-operator:v2.9.0
+        image: cockroachdb/cockroach-operator:v2.10.0
         imagePullPolicy: IfNotPresent
         name: cockroach-operator
         resources:
diff --git a/manifests/cockroachdb/pre_operator.yaml b/manifests/cockroachdb/pre_operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..16718a77918491170502a5cbb864a6fda39c734a
--- /dev/null
+++ b/manifests/cockroachdb/pre_operator.yaml
@@ -0,0 +1,19 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: cockroach-operator
+  name: cockroach-operator-system
diff --git a/manifests/cockroachdb/single-node.yaml b/manifests/cockroachdb/single-node.yaml
index 72454a0904fa70b6b4062dae8ef7e2e5d8625648..20d7e18c69a9eb75bcb70c9123455926d41eebc6 100644
--- a/manifests/cockroachdb/single-node.yaml
+++ b/manifests/cockroachdb/single-node.yaml
@@ -61,6 +61,7 @@ spec:
       containers:
       - name: cockroachdb
         image: cockroachdb/cockroach:latest-v22.2
+        imagePullPolicy: Always
         args:
         - start-single-node
         ports:
diff --git a/manifests/computeservice.yaml b/manifests/computeservice.yaml
index 7e40ef988bc7dcb77960b224dfe5626ee95cfdfb..3e3b041ab20968ad0010eb06f7900faa7b649dc9 100644
--- a/manifests/computeservice.yaml
+++ b/manifests/computeservice.yaml
@@ -20,6 +20,7 @@ spec:
   selector:
     matchLabels:
       app: computeservice
+  replicas: 1
   template:
     metadata:
       labels:
@@ -33,6 +34,7 @@ spec:
         ports:
         - containerPort: 8080
         - containerPort: 9090
+        - containerPort: 9192
         env:
         - name: LOG_LEVEL
           value: "INFO"
@@ -44,16 +46,18 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:9090"]
         resources:
           requests:
-            cpu: 250m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
 ---
 apiVersion: v1
 kind: Service
 metadata:
   name: computeservice
+  labels:
+    app: computeservice
 spec:
   type: ClusterIP
   selector:
@@ -67,3 +71,7 @@ spec:
     protocol: TCP
     port: 9090
     targetPort: 9090
+  - name: metrics
+    protocol: TCP
+    port: 9192
+    targetPort: 9192
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index b1e6eb89dc4ec92409dbd05bbe668987ea93828f..96735bf5f89f682f31131c123ee9884a1becbfdb 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -20,9 +20,11 @@ spec:
   selector:
     matchLabels:
       app: contextservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
       labels:
         app: contextservice
     spec:
@@ -52,11 +54,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:1010"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 250m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 1000m
+            memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
@@ -77,3 +79,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: contextservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: contextservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index ca2c81f0f2e5d874066464ab0537adeec734cfbb..f9a6d987d18bb3d994538c85b2ec14024553b45b 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -45,11 +45,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:2020"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 250m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 1000m
+            memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml
index b94e11e725757fa2ec67de19f98ecfa6a03f085b..7cc6f19122573a612ddca774c3a785bff93f8b38 100644
--- a/manifests/load_generatorservice.yaml
+++ b/manifests/load_generatorservice.yaml
@@ -33,6 +33,7 @@ spec:
         imagePullPolicy: Always
         ports:
         - containerPort: 50052
+        - containerPort: 9192
         env:
         - name: LOG_LEVEL
           value: "INFO"
@@ -44,11 +45,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:50052"]
         resources:
           requests:
-            cpu: 50m
+            cpu: 256m
             memory: 64Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 512m
+            memory: 128Mi
 ---
 apiVersion: v1
 kind: Service
@@ -65,3 +66,7 @@ spec:
     protocol: TCP
     port: 50052
     targetPort: 50052
+  - name: metrics
+    protocol: TCP
+    port: 9192
+    targetPort: 9192
diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml
index fd3599f429f48ebb3cf3f8d802f8f61f00e1b41d..3ba12750b20a7093a570748e67a93922316a66f6 100644
--- a/manifests/pathcompservice.yaml
+++ b/manifests/pathcompservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: pathcompservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -53,6 +53,8 @@ spec:
       - name: backend
         image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest
         imagePullPolicy: Always
+        ports:
+        - containerPort: 8081
         #readinessProbe:
         #  httpGet:
         #    path: /health
@@ -96,3 +98,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: pathcompservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: pathcompservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml
deleted file mode 120000
index bb28f6e2cff4c6b50e44f049dec6a53d31922e86..0000000000000000000000000000000000000000
--- a/manifests/policyservice.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../src/policy/target/kubernetes/kubernetes.yml
\ No newline at end of file
diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..72da09ecaf1de9d080d686c63c0f18c88f09e8b4
--- /dev/null
+++ b/manifests/policyservice.yaml
@@ -0,0 +1,129 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+  labels:
+    app.kubernetes.io/name: policyservice
+    app: policyservice
+  name: policyservice
+spec:
+  ports:
+    - name: metrics
+      port: 9192
+      targetPort: 8080
+    - name: grpc
+      port: 6060
+      targetPort: 6060
+  selector:
+    app.kubernetes.io/name: policyservice
+  type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  annotations:
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+  labels:
+    app: policyservice
+    app.kubernetes.io/name: policyservice
+  name: policyservice
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: policyservice
+  template:
+    metadata:
+      annotations:
+        app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+        app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+      labels:
+        app: policyservice
+        app.kubernetes.io/name: policyservice
+    spec:
+      containers:
+        - env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: SERVICE_SERVICE_HOST
+              value: serviceservice
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
+            - name: MONITORING_SERVICE_HOST
+              value: monitoringservice
+          image: labs.etsi.org:5050/tfs/controller/policy:0.1.0
+          imagePullPolicy: Always
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/live
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          name: policyservice
+          ports:
+            - containerPort: 8080
+              name: metrics
+              protocol: TCP
+            - containerPort: 6060
+              name: grpc-server
+              protocol: TCP
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/ready
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: policyservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: policyservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
\ No newline at end of file
diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml
index 06c3390f4fddbcb6f8adec5d931989cc8a41cc68..ec929f757cdf5468a7db7a7c1f1e755611d5327b 100644
--- a/manifests/servicemonitors.yaml
+++ b/manifests/servicemonitors.yaml
@@ -243,3 +243,119 @@ spec:
     any: false
     matchNames:
     - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-policyservice-metric
+  labels:
+    app: policyservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: policyservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /q/metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-automationservice-metric
+  labels:
+    app: automationservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: automationservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /q/metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-computeservice-metric
+  labels:
+    app: computeservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: computeservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-load-generatorservice-metric
+  labels:
+    app: load-generatorservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: load-generatorservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 3fa4a6e0dc256ba964fd4ee26a8b7095bb2303f4..7d7bdaa4ef9ad4972da6236071810c63a9faa4f8 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: serviceservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -45,11 +45,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:3030"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 250m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 1000m
+            memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
@@ -70,3 +70,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: serviceservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: serviceservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index 49e2b5943d20586941f80e8fc4b5c32c99d70f8e..e7e5c1604a8b971424ff5f7e5bf292c4b263cbfe 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: sliceservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -50,11 +50,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:4040"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 250m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 1000m
+            memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
@@ -75,3 +75,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: sliceservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: sliceservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index f25dbf6e501775d56f266699b1474429b42b2015..b6ddfc0a91ae5316969079c517e148f63fb18b61 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -20,6 +20,7 @@ spec:
   selector:
     matchLabels:
       app: webuiservice
+  replicas: 1
   template:
     metadata:
       labels:
@@ -55,13 +56,13 @@ spec:
           timeoutSeconds: 1
         resources:
           requests:
-            cpu: 100m
-            memory: 512Mi
+            cpu: 50m
+            memory: 64Mi
           limits:
-            cpu: 700m
-            memory: 1024Mi
+            cpu: 500m
+            memory: 512Mi
       - name: grafana
-        image: grafana/grafana:8.5.11
+        image: grafana/grafana:8.5.22
         imagePullPolicy: IfNotPresent
         ports:
           - containerPort: 3000
@@ -92,16 +93,18 @@ spec:
           timeoutSeconds: 1
         resources:
           requests:
-            cpu: 250m
-            memory: 750Mi
+            cpu: 150m
+            memory: 512Mi
           limits:
-            cpu: 700m
+            cpu: 500m
             memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
 metadata:
   name: webuiservice
+  labels:
+    app: webuiservice
 spec:
   type: ClusterIP
   selector:
diff --git a/my_deploy.sh b/my_deploy.sh
index 33694b76c7b3a5718b923274fe06bd3a56038dbc..ee3244ac99d5a2e8d5dba5a6ccd1609b0012c06b 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -29,7 +29,7 @@ export TFS_IMAGE_TAG="dev"
 export TFS_K8S_NAMESPACE="tfs"
 
 # Set additional manifest files to be applied after the deployment
-export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicemonitors.yaml"
 
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
@@ -115,3 +115,12 @@ export QDB_DROP_TABLES_IF_EXIST=""
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/ofc23 b/ofc23
new file mode 120000
index 0000000000000000000000000000000000000000..a1135d4c59a81997350864319a6c267eaaf9ed93
--- /dev/null
+++ b/ofc23
@@ -0,0 +1 @@
+src/tests/ofc23/
\ No newline at end of file
diff --git a/proto/context.proto b/proto/context.proto
index 49d16229cdac5de84f25cfaa7d196d25184f46f0..3b25e6361766ee4c2b52e15aab215409f40cbb56 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -40,7 +40,7 @@ service ContextService {
   rpc SetDevice          (Device        ) returns (       DeviceId        ) {}
   rpc RemoveDevice       (DeviceId      ) returns (       Empty           ) {}
   rpc GetDeviceEvents    (Empty         ) returns (stream DeviceEvent     ) {}
-
+  rpc SelectDevice       (DeviceFilter  ) returns (       DeviceList      ) {}
   rpc ListEndPointNames  (EndPointIdList) returns (       EndPointNameList) {}
 
   rpc ListLinkIds        (Empty         ) returns (       LinkIdList      ) {}
@@ -57,6 +57,7 @@ service ContextService {
   rpc UnsetService       (Service       ) returns (       ServiceId       ) {}
   rpc RemoveService      (ServiceId     ) returns (       Empty           ) {}
   rpc GetServiceEvents   (Empty         ) returns (stream ServiceEvent    ) {}
+  rpc SelectService      (ServiceFilter ) returns (       ServiceList     ) {}
 
   rpc ListSliceIds       (ContextId     ) returns (       SliceIdList     ) {}
   rpc ListSlices         (ContextId     ) returns (       SliceList       ) {}
@@ -65,6 +66,7 @@ service ContextService {
   rpc UnsetSlice         (Slice         ) returns (       SliceId         ) {}
   rpc RemoveSlice        (SliceId       ) returns (       Empty           ) {}
   rpc GetSliceEvents     (Empty         ) returns (stream SliceEvent      ) {}
+  rpc SelectSlice        (SliceFilter   ) returns (       SliceList       ) {}
 
   rpc ListConnectionIds  (ServiceId     ) returns (       ConnectionIdList) {}
   rpc ListConnections    (ServiceId     ) returns (       ConnectionList  ) {}
@@ -191,6 +193,7 @@ enum DeviceDriverEnum {
   DEVICEDRIVER_IETF_NETWORK_TOPOLOGY = 4;
   DEVICEDRIVER_ONF_TR_352 = 5;
   DEVICEDRIVER_XR = 6;
+  DEVICEDRIVER_IETF_L2VPN = 7;
 }
 
 enum DeviceOperationalStatusEnum {
@@ -207,6 +210,13 @@ message DeviceList {
   repeated Device devices = 1;
 }
 
+message DeviceFilter {
+  DeviceIdList device_ids = 1;
+  bool include_endpoints = 2;
+  bool include_config_rules = 3;
+  bool include_components = 4;
+}
+
 message DeviceEvent {
   Event event = 1;
   DeviceId device_id = 2;
@@ -287,6 +297,13 @@ message ServiceList {
   repeated Service services = 1;
 }
 
+message ServiceFilter {
+  ServiceIdList service_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_config_rules = 4;
+}
+
 message ServiceEvent {
   Event event = 1;
   ServiceId service_id = 2;
@@ -341,6 +358,15 @@ message SliceList {
   repeated Slice slices = 1;
 }
 
+message SliceFilter {
+  SliceIdList slice_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_service_ids = 4;
+  bool include_subslice_ids = 5;
+  bool include_config_rules = 6;
+}
+
 message SliceEvent {
   Event event = 1;
   SliceId slice_id = 2;
diff --git a/proto/load_generator.proto b/proto/load_generator.proto
index 86f9469588f1586da5339edad198e39e82598cde..7d0070c66f1104d9903950fb8b59f64e3ec42f71 100644
--- a/proto/load_generator.proto
+++ b/proto/load_generator.proto
@@ -33,21 +33,38 @@ enum RequestTypeEnum {
   REQUESTTYPE_SLICE_L3NM   = 6;
 }
 
+message Range {
+  float minimum = 1;
+  float maximum = 2;
+}
+
+message ScalarOrRange {
+  oneof value {
+    float scalar = 1; // select the scalar value
+    Range range = 2;  // select a random uniformly dstributed value between minimum and maximum
+  }
+}
+
 message Parameters {
   uint64 num_requests = 1;  // if == 0, generate infinite requests
   repeated RequestTypeEnum request_types = 2;
   float offered_load = 3;
   float holding_time = 4;
   float inter_arrival_time = 5;
-  bool do_teardown = 6;
-  bool dry_mode = 7;
-  bool record_to_dlt = 8;
-  string dlt_domain_id = 9;
+  repeated ScalarOrRange availability = 6;    // one from the list is selected
+  repeated ScalarOrRange capacity_gbps = 7;   // one from the list is selected
+  repeated ScalarOrRange e2e_latency_ms = 8;  // one from the list is selected
+  uint32 max_workers = 9;
+  bool do_teardown = 10;
+  bool dry_mode = 11;
+  bool record_to_dlt = 12;
+  string dlt_domain_id = 13;
 }
 
 message Status {
   Parameters parameters = 1;
   uint64 num_generated = 2;
-  bool infinite_loop = 3;
-  bool running = 4;
+  uint64 num_released = 3;
+  bool infinite_loop = 4;
+  bool running = 5;
 }
diff --git a/scripts/old/open_dashboard.sh b/scripts/old/open_dashboard.sh
old mode 100755
new mode 100644
index 4ea206f4538c27fe8563ce5c30ed837781f8d362..2ff15684a499fe390816ebb8e4859cad49d43d32
--- a/scripts/old/open_dashboard.sh
+++ b/scripts/old/open_dashboard.sh
@@ -16,9 +16,7 @@
 
 # this script opens the dashboard
 
-K8S_NAMESPACE=${K8S_NAMESPACE:-'tfs'}
-
-GRAFANA_IP=$(kubectl get service/webuiservice -n ${TFS_K8S_NAMESPACE} -o jsonpath='{.spec.clusterIP}')
+GRAFANA_IP=$(kubectl get service/grafana -n monitoring -o jsonpath='{.spec.clusterIP}')
 GRAFANA_PORT=3000 #$(kubectl get service webuiservice --namespace $TFS_K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==3000)].nodePort}')
 URL=http://${GRAFANA_IP}:${GRAFANA_PORT}
 
diff --git a/scripts/show_logs_compute.sh b/scripts/show_logs_compute.sh
index fc992eb43e5872b4522db6f5c8ce39207f12d559..f0c24b63aa7b7e5c6678659c34dee34e8ce5b49e 100755
--- a/scripts/show_logs_compute.sh
+++ b/scripts/show_logs_compute.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/computeservice
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/computeservice -c server
diff --git a/scripts/show_logs_device.sh b/scripts/show_logs_device.sh
index 6a77c38152716f1e6fbf320671dda25d974431c8..e643f563a6b8ba250985b013cecc9340c53c9411 100755
--- a/scripts/show_logs_device.sh
+++ b/scripts/show_logs_device.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server
diff --git a/scripts/show_logs_load_generator.sh b/scripts/show_logs_load_generator.sh
index d0f2527d74840d48a10e0ec7ba018f513eea2c52..51438f181f5492a1c9c9bc8dd0b5a76f6db1046c 100755
--- a/scripts/show_logs_load_generator.sh
+++ b/scripts/show_logs_load_generator.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/load-generatorservice
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/load-generatorservice -c server
diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh
index 1a152a32216545f53607880c3908266f4ac41e95..61b0b5cc024f89daeffc0745c2689d85500f4115 100755
--- a/scripts/show_logs_monitoring.sh
+++ b/scripts/show_logs_monitoring.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice server
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server
diff --git a/scripts/show_logs_service.sh b/scripts/show_logs_service.sh
index 7ca1c1c2f4286a5fc46f7d36197d376472b447ed..cc75e19c64935f99c7919f9371717b91b0e6b3cb 100755
--- a/scripts/show_logs_service.sh
+++ b/scripts/show_logs_service.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server
diff --git a/scripts/show_logs_slice.sh b/scripts/show_logs_slice.sh
index c71bc92eaa4a8d411372fc0ad4194881a5a2a9c8..7fa8091cce081ff7cef152f465bea8e426b40124 100755
--- a/scripts/show_logs_slice.sh
+++ b/scripts/show_logs_slice.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/sliceservice
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/sliceservice -c server
diff --git a/src/automation/pom.xml b/src/automation/pom.xml
index 2fd5fd263a698145f39c37ed358982de58dfee77..7dfc3dac438fa5df740381be0ef595a5734d7699 100644
--- a/src/automation/pom.xml
+++ b/src/automation/pom.xml
@@ -174,6 +174,11 @@
       <scope>test</scope>
     </dependency>
 
+    <dependency>
+      <groupId>io.quarkus</groupId>
+      <artifactId>quarkus-smallrye-metrics</artifactId>
+  </dependency>
+
   </dependencies>
 
   <build>
diff --git a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
index 51857bb3dba6422fe6ffc93930e0e2bf65b1a223..2f9054cd8296579b3e391aae84ec16ad1f460bdb 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
@@ -27,6 +27,10 @@ import io.quarkus.grpc.GrpcService;
 import io.smallrye.mutiny.Uni;
 import javax.inject.Inject;
 
+import org.eclipse.microprofile.metrics.MetricUnits;
+import org.eclipse.microprofile.metrics.annotation.Counted;
+import org.eclipse.microprofile.metrics.annotation.Timed;
+
 @GrpcService
 public class AutomationGatewayImpl implements AutomationGateway {
 
@@ -40,18 +44,24 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpGetDeviceRole_counter")
+    @Timed(name = "automation_ztpGetDeviceRole_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRole> ztpGetDeviceRole(Automation.DeviceRoleId request) {
         return Uni.createFrom()
                 .item(() -> Automation.DeviceRole.newBuilder().setDevRoleId(request).build());
     }
 
     @Override
+    @Counted(name = "automation_ztpGetDeviceRolesByDeviceId_counter")
+    @Timed(name = "automation_ztpGetDeviceRolesByDeviceId_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleList> ztpGetDeviceRolesByDeviceId(
             ContextOuterClass.DeviceId request) {
         return Uni.createFrom().item(() -> Automation.DeviceRoleList.newBuilder().build());
     }
 
     @Override
+    @Counted(name = "automation_ztpAdd_counter")
+    @Timed(name = "automation_ztpAdd_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleState> ztpAdd(Automation.DeviceRole request) {
         final var devRoleId = request.getDevRoleId().getDevRoleId().getUuid();
         final var deviceId = serializer.deserialize(request.getDevRoleId().getDevId());
@@ -63,6 +73,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpUpdate_counter")
+    @Timed(name = "automation_ztpUpdate_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<DeviceRoleState> ztpUpdate(DeviceRoleConfig request) {
         final var devRoleId = request.getDevRole().getDevRoleId().getDevRoleId().getUuid();
         final var deviceId = serializer.deserialize(request.getDevRole().getDevRoleId().getDevId());
@@ -75,6 +87,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpDelete_counter")
+    @Timed(name = "automation_ztpDelete_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleState> ztpDelete(Automation.DeviceRole request) {
         final var devRoleId = request.getDevRoleId().getDevRoleId().getUuid();
         return automationService
@@ -84,6 +98,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpDeleteAll_counter")
+    @Timed(name = "automation_ztpDeleteAll_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceDeletionResult> ztpDeleteAll(ContextOuterClass.Empty empty) {
         return Uni.createFrom().item(() -> Automation.DeviceDeletionResult.newBuilder().build());
     }
diff --git a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
index 08691b5266b8172a2bd0449df870033bd2664dd0..b0729aa55b25da030f9722330e22a0976a3d007f 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
@@ -853,6 +853,8 @@ public class Serializer {
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352;
             case XR:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR;
+            case IETF_L2VPN:
+                return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN;
             case UNDEFINED:
             default:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED;
@@ -874,6 +876,8 @@ public class Serializer {
                 return DeviceDriverEnum.ONF_TR_352;
             case DEVICEDRIVER_XR:
                 return DeviceDriverEnum.XR;
+            case DEVICEDRIVER_IETF_L2VPN:
+                return DeviceDriverEnum.IETF_L2VPN;
             case DEVICEDRIVER_UNDEFINED:
             case UNRECOGNIZED:
             default:
diff --git a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java
index a364bb0e3cb821d20061d574428984acacb0cc46..3a26937e79d0df2cfead305a10ccadf3c54eae89 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java
@@ -23,5 +23,6 @@ public enum DeviceDriverEnum {
     P4,
     IETF_NETWORK_TOPOLOGY,
     ONF_TR_352,
-    XR
+    XR,
+    IETF_L2VPN
 }
diff --git a/src/automation/src/main/resources/application.yml b/src/automation/src/main/resources/application.yml
index f7b767e98f55556d21910b649fdfda2d9a8f94ec..bf638039daf3460c2f4ef374a380b37d01de1f1c 100644
--- a/src/automation/src/main/resources/application.yml
+++ b/src/automation/src/main/resources/application.yml
@@ -17,7 +17,7 @@ automation:
 quarkus:
   banner:
     path: teraflow-automation-banner.txt
-  grpc:
+  grpc: 
     server:
       port: 5050
       enable-reflection-service: true
@@ -36,6 +36,7 @@ quarkus:
     group: tfs
     name: controller/automation
     registry: labs.etsi.org:5050
+    tag: 0.2.0
 
   kubernetes:
     name: automationservice
@@ -51,12 +52,16 @@ quarkus:
       period: 10s
     ports:
       http:
-        host-port: 8080
+        host-port: 9192
         container-port: 8080
-      grpc:
-        host-port: 5050
-        container-port: 5050
     env:
       vars:
         context-service-host: "contextservice"
         device-service-host: "deviceservice"
+    resources:
+      requests:
+        cpu: 50m
+        memory: 512Mi
+      limits:
+        cpu: 500m
+        memory: 2048Mi
diff --git a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
index 494e608a105897fe005a18b7041b34fe95b40f8b..0931054c682dede502fb9f22bf911439e52c2140 100644
--- a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
+++ b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
@@ -1215,6 +1215,8 @@ class SerializerTest {
                         DeviceDriverEnum.ONF_TR_352,
                         ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352),
                 Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR),
+                Arguments.of(
+                        DeviceDriverEnum.IETF_L2VPN, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN),
                 Arguments.of(
                         DeviceDriverEnum.UNDEFINED, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED));
     }
diff --git a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
index 060e81a556893b1ca3c60928569983506bff3672..b1bccdeccf564b0d8d7bd2a8606f614b00ede972 100644
--- a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
+++ b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
@@ -177,6 +177,10 @@ public final class ContextOuterClass {
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
     DEVICEDRIVER_XR(6),
+    /**
+     * <code>DEVICEDRIVER_IETF_L2VPN = 7;</code>
+     */
+    DEVICEDRIVER_IETF_L2VPN(7),
     UNRECOGNIZED(-1),
     ;
 
@@ -212,6 +216,10 @@ public final class ContextOuterClass {
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
     public static final int DEVICEDRIVER_XR_VALUE = 6;
+    /**
+     * <code>DEVICEDRIVER_IETF_L2VPN = 7;</code>
+     */
+    public static final int DEVICEDRIVER_IETF_L2VPN_VALUE = 7;
 
 
     public final int getNumber() {
@@ -245,6 +253,7 @@ public final class ContextOuterClass {
         case 4: return DEVICEDRIVER_IETF_NETWORK_TOPOLOGY;
         case 5: return DEVICEDRIVER_ONF_TR_352;
         case 6: return DEVICEDRIVER_XR;
+        case 7: return DEVICEDRIVER_IETF_L2VPN;
         default: return null;
       }
     }
diff --git a/src/automation/target/kubernetes/kubernetes.yml b/src/automation/target/kubernetes/kubernetes.yml
index 4dacf3998c3991a441dc374ca6c6abc29e8d3b80..7aa68a257eeda04d6101f05b291882c274c43f86 100644
--- a/src/automation/target/kubernetes/kubernetes.yml
+++ b/src/automation/target/kubernetes/kubernetes.yml
@@ -4,32 +4,36 @@
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 ---
 apiVersion: v1
 kind: Service
 metadata:
   annotations:
-    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:47:48 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app.kubernetes.io/name: automationservice
     app: automationservice
   name: automationservice
 spec:
   ports:
-    - name: grpc
-      port: 5050
-      targetPort: 5050
     - name: http
-      port: 8080
+      port: 9192
       targetPort: 8080
+    - name: grpc-server
+      port: 5050
+      targetPort: 5050
   selector:
     app.kubernetes.io/name: automationservice
   type: ClusterIP
@@ -38,7 +42,12 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   annotations:
-    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:47:48 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app: automationservice
     app.kubernetes.io/name: automationservice
@@ -51,7 +60,12 @@ spec:
   template:
     metadata:
       annotations:
-        app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+        app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+        app.quarkus.io/build-timestamp: 2023-04-04 - 11:47:48 +0000
+        prometheus.io/scrape: "true"
+        prometheus.io/path: /q/metrics
+        prometheus.io/port: "8080"
+        prometheus.io/scheme: http
       labels:
         app: automationservice
         app.kubernetes.io/name: automationservice
@@ -80,12 +94,12 @@ spec:
             timeoutSeconds: 10
           name: automationservice
           ports:
-            - containerPort: 5050
-              name: grpc
-              protocol: TCP
             - containerPort: 8080
               name: http
               protocol: TCP
+            - containerPort: 5050
+              name: grpc-server
+              protocol: TCP
           readinessProbe:
             failureThreshold: 3
             httpGet:
@@ -96,3 +110,10 @@ spec:
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
+          resources:
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+            requests:
+              cpu: 50m
+              memory: 512Mi
diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py
index 99255defdb6b5ee155607536a2e13d23b97b2d3a..bb8948585f163aeb84ee758b8581bc6509d29799 100644
--- a/src/common/DeviceTypes.py
+++ b/src/common/DeviceTypes.py
@@ -25,9 +25,12 @@ class DeviceTypeEnum(Enum):
     EMULATED_OPEN_LINE_SYSTEM       = 'emu-open-line-system'
     EMULATED_OPTICAL_ROADM          = 'emu-optical-roadm'
     EMULATED_OPTICAL_TRANSPONDER    = 'emu-optical-transponder'
+    EMULATED_OPTICAL_SPLITTER       = 'emu-optical-splitter'        # passive component required for XR Constellation
     EMULATED_P4_SWITCH              = 'emu-p4-switch'
+    EMULATED_PACKET_RADIO_ROUTER    = 'emu-packet-radio-router'
     EMULATED_PACKET_ROUTER          = 'emu-packet-router'
     EMULATED_PACKET_SWITCH          = 'emu-packet-switch'
+    EMULATED_XR_CONSTELLATION       = 'emu-xr-constellation'
 
     # Real device types
     DATACENTER                      = 'datacenter'
@@ -36,6 +39,10 @@ class DeviceTypeEnum(Enum):
     OPTICAL_ROADM                   = 'optical-roadm'
     OPTICAL_TRANSPONDER             = 'optical-transponder'
     P4_SWITCH                       = 'p4-switch'
+    PACKET_RADIO_ROUTER             = 'packet-radio-router'
     PACKET_ROUTER                   = 'packet-router'
     PACKET_SWITCH                   = 'packet-switch'
-    XR_CONSTELLATION                = 'xr-constellation'
\ No newline at end of file
+    XR_CONSTELLATION                = 'xr-constellation'
+
+    # ETSI TeraFlowSDN controller
+    TERAFLOWSDN_CONTROLLER          = 'teraflowsdn'
diff --git a/src/common/message_broker/backend/nats/NatsBackendThread.py b/src/common/message_broker/backend/nats/NatsBackendThread.py
index e59e4d6835ef662e4b0ed9f92d79a45c22954a6f..0bedd2b242f7eeaa1585d0eb41c5a0bd9efe07e5 100644
--- a/src/common/message_broker/backend/nats/NatsBackendThread.py
+++ b/src/common/message_broker/backend/nats/NatsBackendThread.py
@@ -12,10 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import asyncio, nats, nats.errors, queue, threading
+import asyncio, logging, nats, nats.errors, queue, threading
 from typing import List
 from common.message_broker.Message import Message
 
+LOGGER = logging.getLogger(__name__)
+
 class NatsBackendThread(threading.Thread):
     def __init__(self, nats_uri : str) -> None:
         self._nats_uri = nats_uri
@@ -32,7 +34,9 @@ class NatsBackendThread(threading.Thread):
         self._tasks_terminated.set()
 
     async def _run_publisher(self) -> None:
+        LOGGER.info('[_run_publisher] NATS URI: {:s}'.format(str(self._nats_uri)))
         client = await nats.connect(servers=[self._nats_uri])
+        LOGGER.info('[_run_publisher] Connected!')
         while not self._terminate.is_set():
             try:
                 message : Message = await self._publish_queue.get()
@@ -47,8 +51,11 @@ class NatsBackendThread(threading.Thread):
     async def _run_subscriber(
         self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event
     ) -> None:
+        LOGGER.info('[_run_subscriber] NATS URI: {:s}'.format(str(self._nats_uri)))
         client = await nats.connect(servers=[self._nats_uri])
+        LOGGER.info('[_run_subscriber] Connected!')
         subscription = await client.subscribe(topic_name)
+        LOGGER.info('[_run_subscriber] Subscribed!')
         while not self._terminate.is_set() and not unsubscribe.is_set():
             try:
                 message = await subscription.next_msg(timeout)
diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py
index c5cbfb5659df7697de93a99da39a228d5df04001..b241d3b62821c0bfe319546cbeadce79fce59db9 100644
--- a/src/common/method_wrappers/Decorator.py
+++ b/src/common/method_wrappers/Decorator.py
@@ -15,7 +15,7 @@
 import grpc, json, logging, threading
 from enum import Enum
 from prettytable import PrettyTable
-from typing import Any, Dict, List, Set, Tuple
+from typing import Any, Dict, List, Optional, Set, Tuple
 from prometheus_client import Counter, Histogram
 from prometheus_client.metrics import MetricWrapperBase, INF
 from common.tools.grpc.Tools import grpc_message_to_json_string
@@ -25,20 +25,28 @@ class MetricTypeEnum(Enum):
     COUNTER_STARTED    = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_started'
     COUNTER_COMPLETED  = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_completed'
     COUNTER_FAILED     = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_failed'
+    COUNTER_BLOCKED    = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_blocked'
     HISTOGRAM_DURATION = 'tfs_{component:s}_{sub_module:s}_{method:s}_histogram_duration'
 
 METRIC_TO_CLASS_PARAMS = {
     MetricTypeEnum.COUNTER_STARTED   : (Counter,   {}),
     MetricTypeEnum.COUNTER_COMPLETED : (Counter,   {}),
     MetricTypeEnum.COUNTER_FAILED    : (Counter,   {}),
+    MetricTypeEnum.COUNTER_BLOCKED   : (Counter,   {}),
     MetricTypeEnum.HISTOGRAM_DURATION: (Histogram, {
         'buckets': (
             # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
-            0.0010, 0.0025, 0.0050, 0.0075,
-            0.0100, 0.0250, 0.0500, 0.0750,
-            0.1000, 0.2500, 0.5000, 0.7500,
-            1.0000, 2.5000, 5.0000, 7.5000,
-            INF)
+            #0.0010, 0.0025, 0.0050, 0.0075,
+            #0.0100, 0.0250, 0.0500, 0.0750,
+            #0.1000, 0.2500, 0.5000, 0.7500,
+            #1.0000, 2.5000, 5.0000, 7.5000,
+            0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009,  # 1~9 ms
+            0.010, 0.020, 0.030, 0.040, 0.050, 0.060, 0.070, 0.080, 0.090,  # 10~90 ms
+            0.100, 0.200, 0.300, 0.400, 0.500, 0.600, 0.700, 0.800, 0.900,  # 100~900 ms
+            1.000, 2.000, 3.000, 4.000, 5.000, 6.000, 7.000, 8.000, 9.000,  # 1~9 sec
+            10.00, 20.00, 30.00, 40.00, 50.00, 60.00, 70.00, 80.00, 90.00,  # 10~90 sec
+            100.0, 110.0, 120.0, INF                                        # 100sec~2min & Infinity
+        )
     })
 }
 
@@ -69,21 +77,45 @@ class MetricsPool:
             return MetricsPool.metrics[metric_name]
 
     def get_metrics(
-        self, method : str
-    ) -> Tuple[MetricWrapperBase, MetricWrapperBase, MetricWrapperBase, MetricWrapperBase]:
+        self, method : str, labels : Optional[Dict[str, str]] = None
+    ) -> Tuple[Histogram, Counter, Counter, Counter]:
         histogram_duration : Histogram = self.get_or_create(method, MetricTypeEnum.HISTOGRAM_DURATION)
         counter_started    : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_STARTED)
         counter_completed  : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_COMPLETED)
         counter_failed     : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_FAILED)
 
-        if len(self._labels) > 0:
-            histogram_duration = histogram_duration.labels(**(self._labels))
-            counter_started    = counter_started.labels(**(self._labels))
-            counter_completed  = counter_completed.labels(**(self._labels))
-            counter_failed     = counter_failed.labels(**(self._labels))
+        if labels is None and len(self._labels) > 0:
+            labels = self._labels
+
+        if labels is not None and len(labels) > 0:
+            histogram_duration = histogram_duration.labels(**labels)
+            counter_started    = counter_started.labels(**labels)
+            counter_completed  = counter_completed.labels(**labels)
+            counter_failed     = counter_failed.labels(**labels)
 
         return histogram_duration, counter_started, counter_completed, counter_failed
 
+    def get_metrics_loadgen(
+        self, method : str, labels : Optional[Dict[str, str]] = None
+    ) -> Tuple[Histogram, Counter, Counter, Counter, Counter]:
+        histogram_duration : Histogram = self.get_or_create(method, MetricTypeEnum.HISTOGRAM_DURATION)
+        counter_started    : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_STARTED)
+        counter_completed  : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_COMPLETED)
+        counter_failed     : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_FAILED)
+        counter_blocked    : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_BLOCKED)
+
+        if labels is None and len(self._labels) > 0:
+            labels = self._labels
+
+        if labels is not None and len(labels) > 0:
+            histogram_duration = histogram_duration.labels(**labels)
+            counter_started    = counter_started.labels(**labels)
+            counter_completed  = counter_completed.labels(**labels)
+            counter_failed     = counter_failed.labels(**labels)
+            counter_blocked    = counter_blocked.labels(**labels)
+
+        return histogram_duration, counter_started, counter_completed, counter_failed, counter_blocked
+
     def get_pretty_table(self, remove_empty_buckets : bool = True) -> PrettyTable:
         with MetricsPool.lock:
             method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]] = dict()
@@ -194,6 +226,8 @@ def safe_and_metered_rpc_method(metrics_pool : MetricsPool, logger : logging.Log
                     # Assume not found or already exists is just a condition, not an error
                     logger.exception('{:s} exception'.format(method_name))
                     counter_failed.inc()
+                else:
+                    counter_completed.inc()
                 grpc_context.abort(e.code, e.details)
             except Exception as e:          # pragma: no cover, pylint: disable=broad-except
                 logger.exception('{:s} exception'.format(method_name))
diff --git a/src/common/method_wrappers/tests/grafana_prometheus_component_rpc.json b/src/common/method_wrappers/tests/grafana_prometheus_component_rpc.json
deleted file mode 100644
index b5b857e7573264f26289ba9a72ec5444e4ac71a4..0000000000000000000000000000000000000000
--- a/src/common/method_wrappers/tests/grafana_prometheus_component_rpc.json
+++ /dev/null
@@ -1,426 +0,0 @@
-{
-  "annotations": {
-    "list": [
-      {
-        "builtIn": 1,
-        "datasource": "-- Grafana --",
-        "enable": true,
-        "hide": true,
-        "iconColor": "rgba(0, 211, 255, 1)",
-        "name": "Annotations & Alerts",
-        "type": "dashboard"
-      }
-    ]
-  },
-  "editable": true,
-  "gnetId": null,
-  "graphTooltip": 0,
-  "id": 25,
-  "iteration": 1671297223428,
-  "links": [],
-  "panels": [
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 24,
-        "x": 0,
-        "y": 0
-      },
-      "hiddenSeries": false,
-      "id": 4,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "alertThreshold": true
-      },
-      "percentage": false,
-      "pluginVersion": "7.5.4",
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_started_total{pod=~\"[[pod]]\"})",
-          "interval": "",
-          "legendFormat": "started",
-          "queryType": "randomWalk",
-          "refId": "A"
-        },
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_completed_total{pod=~\"[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "completed",
-          "refId": "B"
-        },
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_started_total{pod=~\"[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "failed",
-          "refId": "C"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Requests",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transformations": [],
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "$$hashKey": "object:935",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "$$hashKey": "object:936",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "cards": {
-        "cardPadding": null,
-        "cardRound": null
-      },
-      "color": {
-        "cardColor": "#b4ff00",
-        "colorScale": "linear",
-        "colorScheme": "interpolateRdYlGn",
-        "exponent": 0.5,
-        "max": null,
-        "min": 0,
-        "mode": "opacity"
-      },
-      "dataFormat": "tsbuckets",
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "gridPos": {
-        "h": 8,
-        "w": 24,
-        "x": 0,
-        "y": 6
-      },
-      "heatmap": {},
-      "hideZeroBuckets": true,
-      "highlightCards": true,
-      "id": 2,
-      "interval": "60s",
-      "legend": {
-        "show": true
-      },
-      "pluginVersion": "7.5.4",
-      "reverseYBuckets": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(\r\n    max_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m])\r\n) by (le)",
-          "format": "heatmap",
-          "instant": false,
-          "interval": "1m",
-          "intervalFactor": 1,
-          "legendFormat": "{{le}}",
-          "queryType": "randomWalk",
-          "refId": "A"
-        }
-      ],
-      "title": "Histogram",
-      "tooltip": {
-        "show": true,
-        "showHistogram": true
-      },
-      "type": "heatmap",
-      "xAxis": {
-        "show": true
-      },
-      "xBucketNumber": null,
-      "xBucketSize": null,
-      "yAxis": {
-        "decimals": null,
-        "format": "s",
-        "logBase": 1,
-        "max": null,
-        "min": null,
-        "show": true,
-        "splitFactor": null
-      },
-      "yBucketBound": "auto",
-      "yBucketNumber": null,
-      "yBucketSize": null
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 24,
-        "x": 0,
-        "y": 14
-      },
-      "hiddenSeries": false,
-      "id": 5,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "alertThreshold": true
-      },
-      "percentage": false,
-      "pluginVersion": "7.5.4",
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_[[component]]_rpc_[[method]]_histogram_duration_sum{pod=~\"[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "total time",
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Total Exec Time",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transformations": [],
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "$$hashKey": "object:407",
-          "format": "s",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "$$hashKey": "object:408",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    }
-  ],
-  "refresh": "5s",
-  "schemaVersion": 27,
-  "style": "dark",
-  "tags": [],
-  "templating": {
-    "list": [
-      {
-        "allValue": null,
-        "current": {
-          "selected": false,
-          "text": "context",
-          "value": "context"
-        },
-        "datasource": "prometheus",
-        "definition": "metrics(tfs_)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": false,
-        "label": "Component",
-        "multi": false,
-        "name": "component",
-        "options": [],
-        "query": {
-          "query": "metrics(tfs_)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "/tfs_(.+)_rpc_.*/",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      },
-      {
-        "allValue": "",
-        "current": {
-          "selected": false,
-          "text": "getcontext",
-          "value": "getcontext"
-        },
-        "datasource": "prometheus",
-        "definition": "metrics(tfs_[[component]]_rpc_)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": false,
-        "label": "Method",
-        "multi": false,
-        "name": "method",
-        "options": [],
-        "query": {
-          "query": "metrics(tfs_[[component]]_rpc_)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "/tfs_[[component]]_rpc_(.+)_histogram_duration_bucket/",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      },
-      {
-        "allValue": ".*",
-        "current": {
-          "selected": true,
-          "text": [
-            "All"
-          ],
-          "value": [
-            "$__all"
-          ]
-        },
-        "datasource": "prometheus",
-        "definition": "label_values(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket, pod)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": true,
-        "label": "Pod",
-        "multi": true,
-        "name": "pod",
-        "options": [],
-        "query": {
-          "query": "label_values(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket, pod)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      }
-    ]
-  },
-  "time": {
-    "from": "now-15m",
-    "to": "now"
-  },
-  "timepicker": {},
-  "timezone": "",
-  "title": "TFS / Component RPCs",
-  "uid": "KKxzxIFVz",
-  "version": 21
-}
\ No newline at end of file
diff --git a/src/common/method_wrappers/tests/grafana_prometheus_device_driver.json b/src/common/method_wrappers/tests/grafana_prometheus_device_driver.json
deleted file mode 100644
index 2926a409b3b77b16c4e7b5d86ecd7d56f6acdebc..0000000000000000000000000000000000000000
--- a/src/common/method_wrappers/tests/grafana_prometheus_device_driver.json
+++ /dev/null
@@ -1,431 +0,0 @@
-{
-  "annotations": {
-    "list": [
-      {
-        "builtIn": 1,
-        "datasource": "-- Grafana --",
-        "enable": true,
-        "hide": true,
-        "iconColor": "rgba(0, 211, 255, 1)",
-        "name": "Annotations & Alerts",
-        "type": "dashboard"
-      }
-    ]
-  },
-  "editable": true,
-  "gnetId": null,
-  "graphTooltip": 0,
-  "id": 26,
-  "iteration": 1671318718779,
-  "links": [],
-  "panels": [
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 24,
-        "x": 0,
-        "y": 0
-      },
-      "hiddenSeries": false,
-      "id": 4,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "alertThreshold": true
-      },
-      "percentage": false,
-      "pluginVersion": "7.5.4",
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_device_driver_[[method]]_counter_requests_started_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
-          "interval": "",
-          "legendFormat": "started",
-          "queryType": "randomWalk",
-          "refId": "A"
-        },
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_device_driver_[[method]]_counter_requests_completed_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "completed",
-          "refId": "B"
-        },
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_device_driver_[[method]]_counter_requests_failed_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "failed",
-          "refId": "C"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Requests",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transformations": [],
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "$$hashKey": "object:864",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "$$hashKey": "object:865",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "cards": {
-        "cardPadding": null,
-        "cardRound": null
-      },
-      "color": {
-        "cardColor": "#b4ff00",
-        "colorScale": "linear",
-        "colorScheme": "interpolateRdYlGn",
-        "exponent": 0.5,
-        "max": null,
-        "min": 0,
-        "mode": "opacity"
-      },
-      "dataFormat": "tsbuckets",
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "gridPos": {
-        "h": 8,
-        "w": 24,
-        "x": 0,
-        "y": 6
-      },
-      "heatmap": {},
-      "hideZeroBuckets": true,
-      "highlightCards": true,
-      "id": 2,
-      "interval": "60s",
-      "legend": {
-        "show": true
-      },
-      "pluginVersion": "7.5.4",
-      "reverseYBuckets": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(\r\n    max_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"}[1m])\r\n) by (le)",
-          "format": "heatmap",
-          "instant": false,
-          "interval": "60s",
-          "intervalFactor": 1,
-          "legendFormat": "{{le}}",
-          "queryType": "randomWalk",
-          "refId": "A"
-        }
-      ],
-      "timeFrom": null,
-      "title": "Histogram",
-      "tooltip": {
-        "show": true,
-        "showHistogram": true
-      },
-      "type": "heatmap",
-      "xAxis": {
-        "show": true
-      },
-      "xBucketNumber": null,
-      "xBucketSize": null,
-      "yAxis": {
-        "decimals": null,
-        "format": "s",
-        "logBase": 1,
-        "max": null,
-        "min": null,
-        "show": true,
-        "splitFactor": null
-      },
-      "yBucketBound": "auto",
-      "yBucketNumber": null,
-      "yBucketSize": null
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 24,
-        "x": 0,
-        "y": 14
-      },
-      "hiddenSeries": false,
-      "id": 5,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "alertThreshold": true
-      },
-      "percentage": false,
-      "pluginVersion": "7.5.4",
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_device_driver_[[method]]_histogram_duration_sum{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "total time",
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Total Exec Time",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transformations": [],
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "$$hashKey": "object:407",
-          "format": "s",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "$$hashKey": "object:408",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    }
-  ],
-  "refresh": "5s",
-  "schemaVersion": 27,
-  "style": "dark",
-  "tags": [],
-  "templating": {
-    "list": [
-      {
-        "allValue": "",
-        "current": {
-          "selected": false,
-          "text": "setconfig",
-          "value": "setconfig"
-        },
-        "datasource": "prometheus",
-        "definition": "metrics(tfs_device_driver_.+)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": false,
-        "label": "Method",
-        "multi": false,
-        "name": "method",
-        "options": [],
-        "query": {
-          "query": "metrics(tfs_device_driver_.+)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "/tfs_device_driver_(.+config)_histogram_duration_bucket/",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      },
-      {
-        "allValue": ".*",
-        "current": {
-          "selected": true,
-          "text": [
-            "All"
-          ],
-          "value": [
-            "$__all"
-          ]
-        },
-        "datasource": "prometheus",
-        "definition": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, driver)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": true,
-        "label": "Driver",
-        "multi": true,
-        "name": "driver",
-        "options": [],
-        "query": {
-          "query": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, driver)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      },
-      {
-        "allValue": ".*",
-        "current": {
-          "selected": true,
-          "text": [
-            "All"
-          ],
-          "value": [
-            "$__all"
-          ]
-        },
-        "datasource": "prometheus",
-        "definition": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, pod)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": true,
-        "label": "Pod",
-        "multi": true,
-        "name": "pod",
-        "options": [],
-        "query": {
-          "query": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, pod)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "/deviceservice-(.*)/",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      }
-    ]
-  },
-  "time": {
-    "from": "now-15m",
-    "to": "now"
-  },
-  "timepicker": {},
-  "timezone": "",
-  "title": "TFS / Device / Driver",
-  "uid": "eAg-wsOVk",
-  "version": 30
-}
\ No newline at end of file
diff --git a/src/common/method_wrappers/tests/grafana_prometheus_service_handler.json b/src/common/method_wrappers/tests/grafana_prometheus_service_handler.json
deleted file mode 100644
index 48e770afe4bba9c2eb5df76d3532bf35d6cfe192..0000000000000000000000000000000000000000
--- a/src/common/method_wrappers/tests/grafana_prometheus_service_handler.json
+++ /dev/null
@@ -1,432 +0,0 @@
-{
-  "annotations": {
-    "list": [
-      {
-        "builtIn": 1,
-        "datasource": "-- Grafana --",
-        "enable": true,
-        "hide": true,
-        "iconColor": "rgba(0, 211, 255, 1)",
-        "name": "Annotations & Alerts",
-        "type": "dashboard"
-      }
-    ]
-  },
-  "editable": true,
-  "gnetId": null,
-  "graphTooltip": 0,
-  "id": 27,
-  "iteration": 1671319012315,
-  "links": [],
-  "panels": [
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 24,
-        "x": 0,
-        "y": 0
-      },
-      "hiddenSeries": false,
-      "id": 4,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "alertThreshold": true
-      },
-      "percentage": false,
-      "pluginVersion": "7.5.4",
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_service_handler_[[method]]_counter_requests_started_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
-          "instant": false,
-          "interval": "",
-          "legendFormat": "started",
-          "queryType": "randomWalk",
-          "refId": "A"
-        },
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_service_handler_[[method]]_counter_requests_completed_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "completed",
-          "refId": "B"
-        },
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_service_handler_[[method]]_counter_requests_failed_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "failed",
-          "refId": "C"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Requests",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transformations": [],
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "$$hashKey": "object:935",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "$$hashKey": "object:936",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "cards": {
-        "cardPadding": null,
-        "cardRound": null
-      },
-      "color": {
-        "cardColor": "#b4ff00",
-        "colorScale": "linear",
-        "colorScheme": "interpolateRdYlGn",
-        "exponent": 0.5,
-        "max": null,
-        "min": 0,
-        "mode": "opacity"
-      },
-      "dataFormat": "tsbuckets",
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "gridPos": {
-        "h": 8,
-        "w": 24,
-        "x": 0,
-        "y": 6
-      },
-      "heatmap": {},
-      "hideZeroBuckets": true,
-      "highlightCards": true,
-      "id": 2,
-      "interval": "60s",
-      "legend": {
-        "show": true
-      },
-      "pluginVersion": "7.5.4",
-      "reverseYBuckets": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(\r\n    max_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"}[1m])\r\n) by (le)",
-          "format": "heatmap",
-          "instant": false,
-          "interval": "1m",
-          "intervalFactor": 1,
-          "legendFormat": "{{le}}",
-          "queryType": "randomWalk",
-          "refId": "A"
-        }
-      ],
-      "timeFrom": null,
-      "title": "Histogram",
-      "tooltip": {
-        "show": true,
-        "showHistogram": true
-      },
-      "type": "heatmap",
-      "xAxis": {
-        "show": true
-      },
-      "xBucketNumber": null,
-      "xBucketSize": null,
-      "yAxis": {
-        "decimals": null,
-        "format": "s",
-        "logBase": 1,
-        "max": null,
-        "min": null,
-        "show": true,
-        "splitFactor": null
-      },
-      "yBucketBound": "auto",
-      "yBucketNumber": null,
-      "yBucketSize": null
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "prometheus",
-      "fieldConfig": {
-        "defaults": {},
-        "overrides": []
-      },
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 24,
-        "x": 0,
-        "y": 14
-      },
-      "hiddenSeries": false,
-      "id": 5,
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": false,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "alertThreshold": true
-      },
-      "percentage": false,
-      "pluginVersion": "7.5.4",
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "exemplar": true,
-          "expr": "sum(tfs_service_handler_[[method]]_histogram_duration_sum{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
-          "hide": false,
-          "interval": "",
-          "legendFormat": "total time",
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Total Exec Time",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transformations": [],
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "$$hashKey": "object:407",
-          "format": "s",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "$$hashKey": "object:408",
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    }
-  ],
-  "refresh": "5s",
-  "schemaVersion": 27,
-  "style": "dark",
-  "tags": [],
-  "templating": {
-    "list": [
-      {
-        "allValue": "",
-        "current": {
-          "selected": false,
-          "text": "setendpoint",
-          "value": "setendpoint"
-        },
-        "datasource": "prometheus",
-        "definition": "metrics(tfs_service_handler_.+)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": false,
-        "label": "Method",
-        "multi": false,
-        "name": "method",
-        "options": [],
-        "query": {
-          "query": "metrics(tfs_service_handler_.+)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "/tfs_service_handler_(.+)_histogram_duration_bucket/",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      },
-      {
-        "allValue": ".*",
-        "current": {
-          "selected": true,
-          "text": [
-            "All"
-          ],
-          "value": [
-            "$__all"
-          ]
-        },
-        "datasource": "prometheus",
-        "definition": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, handler)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": true,
-        "label": "Handler",
-        "multi": true,
-        "name": "handler",
-        "options": [],
-        "query": {
-          "query": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, handler)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      },
-      {
-        "allValue": ".*",
-        "current": {
-          "selected": true,
-          "text": [
-            "All"
-          ],
-          "value": [
-            "$__all"
-          ]
-        },
-        "datasource": "prometheus",
-        "definition": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, pod)",
-        "description": null,
-        "error": null,
-        "hide": 0,
-        "includeAll": true,
-        "label": "Pod",
-        "multi": true,
-        "name": "pod",
-        "options": [],
-        "query": {
-          "query": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, pod)",
-          "refId": "StandardVariableQuery"
-        },
-        "refresh": 2,
-        "regex": "/serviceservice-(.*)/",
-        "skipUrlSync": false,
-        "sort": 0,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      }
-    ]
-  },
-  "time": {
-    "from": "now-15m",
-    "to": "now"
-  },
-  "timepicker": {},
-  "timezone": "",
-  "title": "TFS / Service / Handler",
-  "uid": "DNOhOIF4k",
-  "version": 16
-}
\ No newline at end of file
diff --git a/src/common/tools/context_queries/Connection.py b/src/common/tools/context_queries/Connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..3021335131332dab73d6d645f4c7937f499732ef
--- /dev/null
+++ b/src/common/tools/context_queries/Connection.py
@@ -0,0 +1,43 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from typing import Optional
+from common.proto.context_pb2 import Connection, ConnectionId
+from context.client.ContextClient import ContextClient
+
+LOGGER = logging.getLogger(__name__)
+
+def get_connection_by_id(
+    context_client : ContextClient, connection_id : ConnectionId, rw_copy : bool = False
+) -> Optional[Connection]:
+    try:
+        ro_connection : Connection = context_client.GetConnection(connection_id)
+        if not rw_copy: return ro_connection
+        rw_connection = Connection()
+        rw_connection.CopyFrom(ro_connection)
+        return rw_connection
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
+        #connection_uuid = connection_id.connection_uuid.uuid
+        #LOGGER.exception('Unable to get connection({:s})'.format(str(connection_uuid)))
+        return None
+
+def get_connection_by_uuid(
+    context_client : ContextClient, connection_uuid : str, rw_copy : bool = False
+) -> Optional[Connection]:
+    # pylint: disable=no-member
+    connection_id = ConnectionId()
+    connection_id.connection_uuid.uuid = connection_uuid
+    return get_connection_by_id(context_client, connection_id, rw_copy=rw_copy)
diff --git a/src/common/tools/context_queries/Device.py b/src/common/tools/context_queries/Device.py
index 166882f2fd90475d6bc64b4c4a7c44535fc6aa64..95f0e90b740d3b167f09db394e259599db20a59b 100644
--- a/src/common/tools/context_queries/Device.py
+++ b/src/common/tools/context_queries/Device.py
@@ -14,23 +14,34 @@
 
 import grpc, logging
 from typing import List, Optional, Set
-from common.proto.context_pb2 import ContextId, Device, DeviceId, Empty, Topology, TopologyId
+from common.proto.context_pb2 import ContextId, Device, DeviceFilter, Empty, Topology, TopologyId
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 
 LOGGER = logging.getLogger(__name__)
 
-def get_device(context_client : ContextClient, device_uuid : str, rw_copy : bool = False) -> Optional[Device]:
+def get_device(
+    context_client : ContextClient, device_uuid : str, rw_copy : bool = False,
+    include_endpoints : bool = True, include_config_rules : bool = True, include_components : bool = True
+) -> Optional[Device]:
+    device_filter = DeviceFilter()
+    device_id = device_filter.device_ids.device_ids.add() # pylint: disable=no-member
+    device_id.device_uuid.uuid = device_uuid
+    device_filter.include_endpoints = include_endpoints
+    device_filter.include_config_rules = include_config_rules
+    device_filter.include_components = include_components
+
     try:
-        # pylint: disable=no-member
-        device_id = DeviceId()
-        device_id.device_uuid.uuid = device_uuid
-        ro_device = context_client.GetDevice(device_id)
+        ro_devices = context_client.SelectDevice(device_filter)
+        if len(ro_devices.devices) == 0: return None
+        assert len(ro_devices.devices) == 1
+        ro_device = ro_devices.devices[0]
         if not rw_copy: return ro_device
         rw_device = Device()
         rw_device.CopyFrom(ro_device)
         return rw_device
-    except grpc.RpcError:
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
         #LOGGER.exception('Unable to get Device({:s})'.format(str(device_uuid)))
         return None
 
diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py
index 25716152c3f37fec93df340073bae8871e16c3a7..b3b74827a5c86838cb4330ae89c1297652dc59b0 100644
--- a/src/common/tools/context_queries/Service.py
+++ b/src/common/tools/context_queries/Service.py
@@ -15,25 +15,43 @@
 import grpc, logging
 from typing import Optional
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import Service, ServiceId
+from common.proto.context_pb2 import Service, ServiceFilter, ServiceId
 from context.client.ContextClient import ContextClient
 
 LOGGER = logging.getLogger(__name__)
 
-def get_service(
-        context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
-        rw_copy : bool = False
-    ) -> Optional[Service]:
+def get_service_by_id(
+    context_client : ContextClient, service_id : ServiceId, rw_copy : bool = False,
+    include_endpoint_ids : bool = True, include_constraints : bool = True, include_config_rules : bool = True
+) -> Optional[Service]:
+    service_filter = ServiceFilter()
+    service_filter.service_ids.service_ids.append(service_id) # pylint: disable=no-member
+    service_filter.include_endpoint_ids = include_endpoint_ids
+    service_filter.include_constraints = include_constraints
+    service_filter.include_config_rules = include_config_rules
+
     try:
-        # pylint: disable=no-member
-        service_id = ServiceId()
-        service_id.context_id.context_uuid.uuid = context_uuid
-        service_id.service_uuid.uuid = service_uuid
-        ro_service = context_client.GetService(service_id)
+        ro_services = context_client.SelectService(service_filter)
+        if len(ro_services.services) == 0: return None
+        assert len(ro_services.services) == 1
+        ro_service = ro_services.services[0]
         if not rw_copy: return ro_service
         rw_service = Service()
         rw_service.CopyFrom(ro_service)
         return rw_service
-    except grpc.RpcError:
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
         #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid)))
         return None
+
+def get_service_by_uuid(
+    context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
+    rw_copy : bool = False, include_endpoint_ids : bool = True, include_constraints : bool = True,
+    include_config_rules : bool = True
+) -> Optional[Service]:
+    service_id = ServiceId()
+    service_id.context_id.context_uuid.uuid = context_uuid  # pylint: disable=no-member
+    service_id.service_uuid.uuid = service_uuid             # pylint: disable=no-member
+    return get_service_by_id(
+        context_client, service_id, rw_copy=rw_copy, include_endpoint_ids=include_endpoint_ids,
+        include_constraints=include_constraints, include_config_rules=include_config_rules)
diff --git a/src/common/tools/context_queries/Slice.py b/src/common/tools/context_queries/Slice.py
index e5fb86d7a5aa8c08bf323f641737efcf8eec14ef..c3ce572fce8b3fb209b46b561a4004979dce4913 100644
--- a/src/common/tools/context_queries/Slice.py
+++ b/src/common/tools/context_queries/Slice.py
@@ -15,25 +15,47 @@
 import grpc, logging
 from typing import Optional
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import Slice, SliceId
+from common.proto.context_pb2 import Slice, SliceFilter, SliceId
 from context.client.ContextClient import ContextClient
 
 LOGGER = logging.getLogger(__name__)
 
-def get_slice(
-        context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
-        rw_copy : bool = False
-    ) -> Optional[Slice]:
+def get_slice_by_id(
+    context_client : ContextClient, slice_id : SliceId, rw_copy : bool = False, include_endpoint_ids : bool = True,
+    include_constraints : bool = True, include_service_ids : bool = True, include_subslice_ids : bool = True,
+    include_config_rules : bool = True
+) -> Optional[Slice]:
+    slice_filter = SliceFilter()
+    slice_id = slice_filter.slice_ids.slice_ids.append(slice_id) # pylint: disable=no-member
+    slice_filter.include_endpoint_ids = include_endpoint_ids
+    slice_filter.include_constraints = include_constraints
+    slice_filter.include_service_ids = include_service_ids
+    slice_filter.include_subslice_ids = include_subslice_ids
+    slice_filter.include_config_rules = include_config_rules
+
     try:
-        # pylint: disable=no-member
-        slice_id = SliceId()
-        slice_id.context_id.context_uuid.uuid = context_uuid
-        slice_id.slice_uuid.uuid = slice_uuid
-        ro_slice = context_client.GetSlice(slice_id)
+        ro_slices = context_client.SelectSlice(slice_filter)
+        if len(ro_slices.slices) == 0: return None
+        assert len(ro_slices.slices) == 1
+        ro_slice = ro_slices.slices[0]
         if not rw_copy: return ro_slice
         rw_slice = Slice()
         rw_slice.CopyFrom(ro_slice)
         return rw_slice
-    except grpc.RpcError:
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
         #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid)))
         return None
+
+def get_slice_by_uuid(
+    context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
+    rw_copy : bool = False, include_endpoint_ids : bool = True, include_constraints : bool = True,
+    include_service_ids : bool = True, include_subslice_ids : bool = True, include_config_rules : bool = True
+) -> Optional[Slice]:
+    slice_id = SliceId()
+    slice_id.context_id.context_uuid.uuid = context_uuid    # pylint: disable=no-member
+    slice_id.slice_uuid.uuid = slice_uuid                   # pylint: disable=no-member
+    return get_slice_by_id(
+        context_client, slice_id, rw_copy=rw_copy, include_endpoint_ids=include_endpoint_ids,
+        include_constraints=include_constraints, include_service_ids=include_service_ids,
+        include_subslice_ids=include_subslice_ids, include_config_rules=include_config_rules)
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
index 0e1d8c7371e87b47bfc47a4242e00039add48e7f..1e238510c98b83bebde8167711b988d7476e5a99 100644
--- a/src/common/tools/descriptor/Loader.py
+++ b/src/common/tools/descriptor/Loader.py
@@ -222,13 +222,13 @@ class DescriptorLoader:
         self.__topologies_add = get_descriptors_add_topologies(self.__topologies)
 
         if self.__dummy_mode:
-            self._dummy_mode()
+            self._load_dummy_mode()
         else:
-            self._normal_mode()
+            self._load_normal_mode()
         
         return self.__results
 
-    def _dummy_mode(self) -> None:
+    def _load_dummy_mode(self) -> None:
         # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
         self.__ctx_cli.connect()
         self._process_descr('context',    'add',    self.__ctx_cli.SetContext,    Context,    self.__contexts_add  )
@@ -242,7 +242,7 @@ class DescriptorLoader:
         self._process_descr('topology',   'update', self.__ctx_cli.SetTopology,   Topology,   self.__topologies    )
         #self.__ctx_cli.close()
 
-    def _normal_mode(self) -> None:
+    def _load_normal_mode(self) -> None:
         # Normal mode: follows the automated workflows in the different components
         assert len(self.__connections) == 0, 'in normal mode, connections should not be set'
 
@@ -321,7 +321,35 @@ class DescriptorLoader:
             response = self.__ctx_cli.ListSlices(ContextId(**json_context_id(context_uuid)))
             assert len(response.slices) == num_slices
 
-    def unload(self) -> None:
+    def _unload_dummy_mode(self) -> None:
+        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
+        self.__ctx_cli.connect()
+
+        for _, slice_list in self.slices.items():
+            for slice_ in slice_list:
+                self.__ctx_cli.RemoveSlice(SliceId(**slice_['slice_id']))
+
+        for _, service_list in self.services.items():
+            for service in service_list:
+                self.__ctx_cli.RemoveService(ServiceId(**service['service_id']))
+
+        for link in self.links:
+            self.__ctx_cli.RemoveLink(LinkId(**link['link_id']))
+
+        for device in self.devices:
+            self.__ctx_cli.RemoveDevice(DeviceId(**device['device_id']))
+
+        for _, topology_list in self.topologies.items():
+            for topology in topology_list:
+                self.__ctx_cli.RemoveTopology(TopologyId(**topology['topology_id']))
+
+        for context in self.contexts:
+            self.__ctx_cli.RemoveContext(ContextId(**context['context_id']))
+
+        #self.__ctx_cli.close()
+
+    def _unload_normal_mode(self) -> None:
+        # Normal mode: follows the automated workflows in the different components
         self.__ctx_cli.connect()
         self.__dev_cli.connect()
         self.__svc_cli.connect()
@@ -348,6 +376,17 @@ class DescriptorLoader:
         for context in self.contexts:
             self.__ctx_cli.RemoveContext(ContextId(**context['context_id']))
 
+        #self.__ctx_cli.close()
+        #self.__dev_cli.close()
+        #self.__svc_cli.close()
+        #self.__slc_cli.close()
+
+    def unload(self) -> None:
+        if self.__dummy_mode:
+            self._unload_dummy_mode()
+        else:
+            self._unload_normal_mode()
+
 def compose_notifications(results : TypeResults) -> TypeNotificationList:
     notifications = []
     for entity_name, action_name, num_ok, error_list in results:
diff --git a/src/common/tools/object_factory/Device.py b/src/common/tools/object_factory/Device.py
index 0cc4555d455bf28ac2143a5d58b87e084a8360c7..66c87b14dd866d44b5d48addf93d172aea962f8e 100644
--- a/src/common/tools/object_factory/Device.py
+++ b/src/common/tools/object_factory/Device.py
@@ -43,6 +43,9 @@ DEVICE_MICROWAVE_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY]
 DEVICE_P4_TYPE      = DeviceTypeEnum.P4_SWITCH.value
 DEVICE_P4_DRIVERS   = [DeviceDriverEnum.DEVICEDRIVER_P4]
 
+DEVICE_TFS_TYPE    = DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value
+DEVICE_TFS_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN]
+
 def json_device_id(device_uuid : str):
     return {'device_uuid': {'uuid': device_uuid}}
 
@@ -120,6 +123,13 @@ def json_device_p4_disabled(
     return json_device(
         device_uuid, DEVICE_P4_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers)
 
+def json_device_tfs_disabled(
+        device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [],
+        drivers : List[Dict] = DEVICE_TFS_DRIVERS
+    ):
+    return json_device(
+        device_uuid, DEVICE_TFS_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers)
+
 def json_device_connect_rules(address : str, port : int, settings : Dict = {}):
     return [
         json_config_rule_set('_connect/address',  address),
diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py
index c0442d8770c682ac1eea032980b58e7028be90c4..ba82e535ec958104bd14abf625eb6cd38c2a08ee 100644
--- a/src/common/type_checkers/Assertions.py
+++ b/src/common/type_checkers/Assertions.py
@@ -33,6 +33,7 @@ def validate_device_driver_enum(message):
         'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY',
         'DEVICEDRIVER_ONF_TR_352',
         'DEVICEDRIVER_XR',
+        'DEVICEDRIVER_IETF_L2VPN',
     ]
 
 def validate_device_operational_status_enum(message):
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py
index f95b532af4ba01968d17bc3958e1cffbf84a5e7f..ed25dbab3cd6b07ef73d64c5d37ad64e85353c02 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py
@@ -74,4 +74,18 @@ BEARER_MAPPINGS = {
     'R3:1/3': ('R3', '1/3', '5.3.1.3', None, 0, None, None, None, None),
     'R4:1/2': ('R4', '1/2', '5.4.1.2', None, 0, None, None, None, None),
     'R4:1/3': ('R4', '1/3', '5.4.1.3', None, 0, None, None, None, None),
+
+    # OFC'23
+    'PE1:1/1': ('PE1', '1/1', '10.1.1.1', None, 0, None, None, None, None),
+    'PE1:1/2': ('PE1', '1/2', '10.1.1.2', None, 0, None, None, None, None),
+    'PE2:1/1': ('PE2', '1/1', '10.2.1.1', None, 0, None, None, None, None),
+    'PE2:1/2': ('PE2', '1/2', '10.2.1.2', None, 0, None, None, None, None),
+    'PE3:1/1': ('PE3', '1/1', '10.3.1.1', None, 0, None, None, None, None),
+    'PE3:1/2': ('PE3', '1/2', '10.3.1.2', None, 0, None, None, None, None),
+    'PE4:1/1': ('PE4', '1/1', '10.4.1.1', None, 0, None, None, None, None),
+    'PE4:1/2': ('PE4', '1/2', '10.4.1.2', None, 0, None, None, None, None),
+
+    'R149:eth-1/0/22': ('R149', 'eth-1/0/22', '5.5.5.5', None, 0, None, None, '5.5.5.1', '100'),
+    'R155:eth-1/0/22': ('R155', 'eth-1/0/22', '5.5.5.1', None, 0, None, None, '5.5.5.5', '100'),
+    'R199:eth-1/0/21': ('R199', 'eth-1/0/21', '5.5.5.6', None, 0, None, None, '5.5.5.5', '100'),
 }
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
index f12c4526aec27a60e579540ecae90720c707a117..9a33cd2281d4dfd7a0a8dac964b9b35d7975cf28 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
@@ -17,7 +17,7 @@ from flask import request
 from flask.json import jsonify
 from flask_restful import Resource
 from common.proto.context_pb2 import SliceStatusEnum
-from common.tools.context_queries.Slice import get_slice
+from common.tools.context_queries.Slice import get_slice_by_uuid
 from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
 from ..tools.Authentication import HTTP_AUTH
@@ -34,7 +34,7 @@ class L2VPN_Service(Resource):
         try:
             context_client = ContextClient()
 
-            target = get_slice(context_client, vpn_id, rw_copy=True)
+            target = get_slice_by_uuid(context_client, vpn_id, rw_copy=True)
             if target is None:
                 raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
@@ -59,7 +59,7 @@ class L2VPN_Service(Resource):
         try:
             context_client = ContextClient()
 
-            target = get_slice(context_client, vpn_id)
+            target = get_slice_by_uuid(context_client, vpn_id)
             if target is None:
                 LOGGER.warning('VPN({:s}) not found in database. Nothing done.'.format(str(vpn_id)))
             else:
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index ff7ad3c1481d3c0f3cdf7a6b6004f62677948ecc..7e829479a0a3dbd4968d488a22dc62219fa5376c 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -20,7 +20,7 @@ from flask.wrappers import Response
 from flask_restful import Resource
 from werkzeug.exceptions import UnsupportedMediaType
 from common.proto.context_pb2 import Slice
-from common.tools.context_queries.Slice import get_slice
+from common.tools.context_queries.Slice import get_slice_by_uuid
 from common.tools.grpc.ConfigRules import update_config_rule_custom
 from common.tools.grpc.Constraints import (
     update_constraint_custom_dict, update_constraint_endpoint_location, update_constraint_endpoint_priority,
@@ -68,7 +68,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
         address_ip, address_prefix, remote_router, circuit_id
     ) = mapping
 
-    target = get_slice(context_client, vpn_id, rw_copy=True)
+    target = get_slice_by_uuid(context_client, vpn_id, rw_copy=True)
     if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
     endpoint_ids = target.slice_endpoint_ids        # pylint: disable=no-member
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_network_slice/NSS_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_network_slice/NSS_Service.py
index f9b17c8b144ad6cf477b978ceb50b3497c9c074b..32ee81e801e1d93a23a8a752aa6c63cfffe43a82 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_network_slice/NSS_Service.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_network_slice/NSS_Service.py
@@ -16,7 +16,7 @@ import logging
 from flask.json import jsonify
 from flask_restful import Resource
 from common.proto.context_pb2 import SliceStatusEnum
-from common.tools.context_queries.Slice import get_slice
+from common.tools.context_queries.Slice import get_slice_by_uuid
 from common.tools.grpc.Tools import grpc_message_to_json
 from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
@@ -32,7 +32,7 @@ class NSS_Service(Resource):
         try:
             context_client = ContextClient()
 
-            target = get_slice(context_client, slice_id, rw_copy=True)
+            target = get_slice_by_uuid(context_client, slice_id, rw_copy=True)
             if target is None:
                 raise Exception('Slice({:s}) not found in database'.format(str(slice_id)))
 
@@ -56,7 +56,7 @@ class NSS_Service(Resource):
         LOGGER.debug('DELETE Slice ID: {:s}'.format(str(slice_id)))
         try:
             context_client = ContextClient()
-            target = get_slice(context_client, slice_id)
+            target = get_slice_by_uuid(context_client, slice_id)
 
             response = jsonify({})
             response.status_code = HTTP_OK
diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py
index 7c3832d6b3ea7de0a495faee143b73179e8da5b9..13d9dc0035b45845bf11367e02c8830b5151c1d6 100644
--- a/src/context/client/ContextClient.py
+++ b/src/context/client/ContextClient.py
@@ -21,11 +21,11 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
     Empty, EndPointIdList, EndPointNameList,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
     Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.context_pb2_grpc import ContextServiceStub
 from common.proto.context_policy_pb2_grpc import ContextPolicyServiceStub
@@ -185,6 +185,13 @@ class ContextClient:
         LOGGER.debug('RemoveDevice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectDevice(self, request: DeviceFilter) -> DeviceList:
+        LOGGER.debug('SelectDevice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectDevice(request)
+        LOGGER.debug('SelectDevice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetDeviceEvents(self, request: Empty) -> Iterator[DeviceEvent]:
         LOGGER.debug('GetDeviceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
@@ -283,6 +290,13 @@ class ContextClient:
         LOGGER.debug('RemoveService result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectService(self, request: ServiceFilter) -> ServiceList:
+        LOGGER.debug('SelectService request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectService(request)
+        LOGGER.debug('SelectService result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetServiceEvents(self, request: Empty) -> Iterator[ServiceEvent]:
         LOGGER.debug('GetServiceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
@@ -332,6 +346,13 @@ class ContextClient:
         LOGGER.debug('RemoveSlice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectSlice(self, request: SliceFilter) -> SliceList:
+        LOGGER.debug('SelectSlice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectSlice(request)
+        LOGGER.debug('SelectSlice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetSliceEvents(self, request: Empty) -> Iterator[SliceEvent]:
         LOGGER.debug('GetSliceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
diff --git a/src/context/data/sql_hash_join_full_scan_tests.sql b/src/context/data/sql_hash_join_full_scan_tests.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ebead1be6cae62bce06ab9324c0381b9266eee9c
--- /dev/null
+++ b/src/context/data/sql_hash_join_full_scan_tests.sql
@@ -0,0 +1,119 @@
+-- When inserting config rules, for instance related to device
+--   If we insert few rules (3~4 rows), does a lookup join with more rows does hash join which is less performant...
+--   To be investigated...
+
+-----------------------------------------------------------------------------------------------------
+-- Scenario: tests database with device and device_configrule tables
+
+CREATE DATABASE tests;
+USE tests;
+
+CREATE TYPE public.orm_deviceoperationalstatusenum AS ENUM ('UNDEFINED', 'DISABLED', 'ENABLED');
+CREATE TYPE public.orm_devicedriverenum AS ENUM ('UNDEFINED', 'OPENCONFIG', 'TRANSPORT_API', 'P4', 'IETF_NETWORK_TOPOLOGY', 'ONF_TR_352', 'XR', 'IETF_L2VPN');
+CREATE TYPE public.configrulekindenum AS ENUM ('CUSTOM', 'ACL');
+CREATE TYPE public.orm_configactionenum AS ENUM ('UNDEFINED', 'SET', 'DELETE');
+
+CREATE TABLE public.device (
+  device_uuid UUID NOT NULL,
+  device_name VARCHAR NOT NULL,
+  device_type VARCHAR NOT NULL,
+  device_operational_status public.orm_deviceoperationalstatusenum NOT NULL,
+  device_drivers public.orm_devicedriverenum[] NULL,
+  created_at TIMESTAMP NOT NULL,
+  updated_at TIMESTAMP NOT NULL,
+  CONSTRAINT device_pkey PRIMARY KEY (device_uuid ASC)
+);
+
+CREATE TABLE public.device_configrule (
+  configrule_uuid UUID NOT NULL,
+  device_uuid UUID NOT NULL,
+  "position" INT8 NOT NULL,
+  kind public.configrulekindenum NOT NULL,
+  action public.orm_configactionenum NOT NULL,
+  data VARCHAR NOT NULL,
+  created_at TIMESTAMP NOT NULL,
+  updated_at TIMESTAMP NOT NULL,
+  CONSTRAINT device_configrule_pkey PRIMARY KEY (configrule_uuid ASC),
+  CONSTRAINT device_configrule_device_uuid_fkey FOREIGN KEY (device_uuid) REFERENCES public.device(device_uuid) ON DELETE CASCADE,
+  INDEX device_configrule_device_uuid_rec_idx (device_uuid ASC) STORING ("position", kind, action, data, created_at, updated_at),
+  CONSTRAINT check_position_value CHECK ("position" >= 0:::INT8)
+);
+
+-----------------------------------------------------------------------------------------------------
+-- Populate devices
+
+INSERT INTO device (device_uuid, device_name, device_type, device_operational_status, device_drivers, created_at, updated_at) VALUES
+('a3645f8a-5f1f-4d91-8b11-af4104e57f52'::UUID, 'R1', 'router', 'ENABLED', ARRAY['UNDEFINED'], '2023-04-21 07:51:00.0', '2023-04-21 07:51:00.0'),
+('7c1e923c-145c-48c5-8016-0d1f596cb4c1'::UUID, 'R2', 'router', 'ENABLED', ARRAY['UNDEFINED'], '2023-04-21 07:51:00.0', '2023-04-21 07:51:00.0')
+ON CONFLICT (device_uuid) DO UPDATE SET
+device_name=excluded.device_name,
+device_operational_status=excluded.device_operational_status,
+updated_at=excluded.updated_at
+RETURNING device.created_at, device.updated_at;
+
+-----------------------------------------------------------------------------------------------------
+-- Examine insertion of config rules...
+
+-- Helpful commands:
+--   ANALYZE (VERBOSE, TYPES) <statement>
+--   EXPLAIN (VERBOSE, TYPES) <statement>
+
+-- Rows with realistic data
+EXPLAIN (TYPES, VERBOSE) INSERT INTO device_configrule (configrule_uuid, device_uuid, position, kind, action, data, created_at, updated_at) VALUES
+('5491b521-76a2-57c4-b622-829131374b4b', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 0, 'CUSTOM', 'SET', '{"resource_key": "_connect/address", "resource_value": "127.0.0.1"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('1f39fb84-2337-5735-a873-2bc7cd50bdd2', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 1, 'CUSTOM', 'SET', '{"resource_key": "_connect/port", "resource_value": "0"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('5d4c72f9-7acc-5ab2-a41e-0d4bc625943e', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 2, 'CUSTOM', 'SET', '{"resource_key": "_connect/settings", "resource_value": "{\\n\\"endpoints\\": [\\n{\\n\\"sample_types\\": [],\\n\\"type\\": \\"copper\\",\\n\\"uuid\\ ... (573 characters truncated) ... "type\\": \\"copper\\",\\n\\"uuid\\": \\"2/5\\"\\n},\\n{\\n\\"sample_types\\": [],\\n\\"type\\": \\"copper\\",\\n\\"uuid\\": \\"2/6\\"\\n}\\n]\\n}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('d14c3fc7-4998-5707-b1c4-073d553a86ef', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 3, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[1/1]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"1/1\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('e3268fba-e695-59d0-b26e-014930f416fd', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 4, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[1/2]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"1/2\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('043eb444-81c8-5ac9-83df-0c6a74bad534', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 5, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[1/3]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"1/3\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('df79d0ea-bcda-57fc-8926-e9a9257628dd', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 6, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[2/1]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"2/1\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('94f41adc-1041-5a8a-81f5-1224d884ae57', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 7, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[2/2]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"2/2\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('b27c0207-dc43-59db-a856-74aaab4f1a19', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 8, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[2/3]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"2/3\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('5902903f-0be4-5ec6-a133-c3e2f9ae05a6', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 9, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[2/4]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"2/4\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('eb20a698-99f2-5369-a228-610cd289297a', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 10, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[2/5]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"2/5\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('3a73b766-195c-59ec-a66e-d32391bc35a3', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 11, 'CUSTOM', 'SET', '{"resource_key": "/endpoints/endpoint[2/6]", "resource_value": "{\\"sample_types\\": {}, \\"type\\": \\"copper\\", \\"uuid\\": \\"2/6\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('353ba35c-5ec6-5f38-8ca9-e6c024772282', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 12, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:126]", "resource_value": "{\\"name\\": \\"ELAN-AC:126\\", \\"type\\": \\"L2VSI\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('45582643-09a1-5ade-beac-2bf057549d38', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 13, 'CUSTOM', 'SET', '{"resource_key": "/interface[2/4.126]/subinterface[0]", "resource_value": "{\\"index\\": 0, \\"name\\": \\"2/4.126\\", \\"type\\": \\"l2vlan\\", \\"vlan_id\\": 26}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('9484afcd-b010-561e-ba83-6b0654d8816c', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 14, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:126]/interface[2/4.126]", "resource_value": "{\\"id\\": \\"2/4.126\\", \\"interface\\": \\"2/4.126\\", \\"name\\": \\"ELAN-AC:126\\", \\"subinterface\\": 0}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('f1cc8161-eaee-5139-a3e5-207fbe11800d', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 15, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:126]/connection_point[VC-1]", "resource_value": "{\\"VC_ID\\": \\"126\\", \\"connection_point\\": \\"VC-1\\", \\"name\\": \\"ELAN-AC:126\\", \\"remote_system\\": \\"10.0.0.6\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('f1836e1d-74a1-51be-944f-a2cedc297812', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 16, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:128]", "resource_value": "{\\"name\\": \\"ELAN-AC:128\\", \\"type\\": \\"L2VSI\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('86f152ea-6abf-5bd4-b2c8-eddfe2826847', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 17, 'CUSTOM', 'SET', '{"resource_key": "/interface[1/2.128]/subinterface[0]", "resource_value": "{\\"index\\": 0, \\"name\\": \\"1/2.128\\", \\"type\\": \\"l2vlan\\", \\"vlan_id\\": 28}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('c36e9d88-0ee3-5826-9a27-3b25a7520121', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 18, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:128]/interface[1/2.128]", "resource_value": "{\\"id\\": \\"1/2.128\\", \\"interface\\": \\"1/2.128\\", \\"name\\": \\"ELAN-AC:128\\", \\"subinterface\\": 0}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('a9d4c170-ca55-5969-8329-5bbbceec5bd6', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 19, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:128]/connection_point[VC-1]", "resource_value": "{\\"VC_ID\\": \\"128\\", \\"connection_point\\": \\"VC-1\\", \\"name\\": \\"ELAN-AC:128\\", \\"remote_system\\": \\"10.0.0.3\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('d86ceb87-e87a-5b1d-b3d9-15af96233500', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 20, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:136]", "resource_value": "{\\"name\\": \\"ELAN-AC:136\\", \\"type\\": \\"L2VSI\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('cef724a0-3a51-5dd7-b9e5-bde174f4a8d2', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 21, 'CUSTOM', 'SET', '{"resource_key": "/interface[2/6.136]/subinterface[0]", "resource_value": "{\\"index\\": 0, \\"name\\": \\"2/6.136\\", \\"type\\": \\"l2vlan\\", \\"vlan_id\\": 36}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('9a01ca29-4ef6-50f3-84f0-a219e7c1689e', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 22, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:136]/interface[2/6.136]", "resource_value": "{\\"id\\": \\"2/6.136\\", \\"interface\\": \\"2/6.136\\", \\"name\\": \\"ELAN-AC:136\\", \\"subinterface\\": 0}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('9212773d-6a4f-5cce-ae5b-85adfdf6674e', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 23, 'CUSTOM', 'SET', '{"resource_key": "/network_instance[ELAN-AC:136]/connection_point[VC-1]", "resource_value": "{\\"VC_ID\\": \\"136\\", \\"connection_point\\": \\"VC-1\\", \\"name\\": \\"ELAN-AC:136\\", \\"remote_system\\": \\"10.0.0.2\\"}"}', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892')
+ON CONFLICT (configrule_uuid) DO UPDATE SET position = excluded.position, action = excluded.action, data = excluded.data, updated_at = excluded.updated_at
+RETURNING device_configrule.created_at, device_configrule.updated_at;
+
+
+-- Rows with empty data (still does full scan and hash join)
+-- If only 3~4 rows are inserted it does lookup join...
+EXPLAIN INSERT INTO device_configrule (configrule_uuid, device_uuid, position, kind, action, data, created_at, updated_at) VALUES
+('5491b521-76a2-57c4-b622-829131374b4b', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 0, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('1f39fb84-2337-5735-a873-2bc7cd50bdd2', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 1, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('5d4c72f9-7acc-5ab2-a41e-0d4bc625943e', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 2, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('d14c3fc7-4998-5707-b1c4-073d553a86ef', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 3, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('e3268fba-e695-59d0-b26e-014930f416fd', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 4, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('043eb444-81c8-5ac9-83df-0c6a74bad534', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 5, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('df79d0ea-bcda-57fc-8926-e9a9257628dd', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 6, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('94f41adc-1041-5a8a-81f5-1224d884ae57', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 7, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('b27c0207-dc43-59db-a856-74aaab4f1a19', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 8, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('5902903f-0be4-5ec6-a133-c3e2f9ae05a6', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 9, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('eb20a698-99f2-5369-a228-610cd289297a', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 10, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('3a73b766-195c-59ec-a66e-d32391bc35a3', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 11, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('353ba35c-5ec6-5f38-8ca9-e6c024772282', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 12, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('45582643-09a1-5ade-beac-2bf057549d38', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 13, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('9484afcd-b010-561e-ba83-6b0654d8816c', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 14, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('f1cc8161-eaee-5139-a3e5-207fbe11800d', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 15, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('f1836e1d-74a1-51be-944f-a2cedc297812', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 16, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('86f152ea-6abf-5bd4-b2c8-eddfe2826847', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 17, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('c36e9d88-0ee3-5826-9a27-3b25a7520121', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 18, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('a9d4c170-ca55-5969-8329-5bbbceec5bd6', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 19, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('d86ceb87-e87a-5b1d-b3d9-15af96233500', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 20, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('cef724a0-3a51-5dd7-b9e5-bde174f4a8d2', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 21, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('9a01ca29-4ef6-50f3-84f0-a219e7c1689e', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 22, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892'),
+('9212773d-6a4f-5cce-ae5b-85adfdf6674e', 'a3645f8a-5f1f-4d91-8b11-af4104e57f52', 23, 'CUSTOM', 'SET', '', '2023-04-20 17:33:54.044892', '2023-04-20 17:33:54.044892')
+ON CONFLICT (configrule_uuid) DO UPDATE SET position = excluded.position, action = excluded.action, data = excluded.data, updated_at = excluded.updated_at
+RETURNING device_configrule.created_at, device_configrule.updated_at;
diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py
index 6fe00f917cf8b338f0934e2a268fa757d2055865..6d540b4945df8516697c957316294a452186ddb1 100644
--- a/src/context/service/ContextServiceServicerImpl.py
+++ b/src/context/service/ContextServiceServicerImpl.py
@@ -18,11 +18,11 @@ from common.message_broker.MessageBroker import MessageBroker
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
     Empty, EndPointIdList, EndPointNameList, EventTypeEnum,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
     Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
 from common.proto.context_pb2_grpc import ContextServiceServicer
@@ -31,13 +31,13 @@ from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_m
 from .database.Connection import (
     connection_delete, connection_get, connection_list_ids, connection_list_objs, connection_set)
 from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set
-from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set
+from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_select, device_set
 from .database.EndPoint import endpoint_list_names
 from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set
 from .database.PolicyRule import (
     policyrule_delete, policyrule_get, policyrule_list_ids, policyrule_list_objs, policyrule_set)
-from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set
-from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset
+from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_select, service_set, service_unset
+from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_select, slice_set, slice_unset
 from .database.Topology import (
     topology_delete, topology_get, topology_get_details, topology_list_ids, topology_list_objs, topology_set)
 from .Events import (
@@ -161,6 +161,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectDevice(self, request : DeviceFilter, context : grpc.ServicerContext) -> DeviceList:
+        return DeviceList(devices=device_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
         for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
@@ -227,6 +231,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
         notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
         return ServiceId(**service_id)
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def UnsetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId:
+        service_id,updated = service_unset(self.db_engine, request)
+        if updated:
+            event_type = EventTypeEnum.EVENTTYPE_UPDATE
+            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
+        return ServiceId(**service_id)
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty:
         service_id,deleted = service_delete(self.db_engine, request)
@@ -235,6 +247,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectService(self, request : ServiceFilter, context : grpc.ServicerContext) -> ServiceList:
+        return ServiceList(services=service_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
         for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
@@ -278,6 +294,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectSlice(self, request : SliceFilter, context : grpc.ServicerContext) -> SliceList:
+        return SliceList(slices=slice_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
         for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py
index 09723cc6f6b31e2496bf5ab475f50d0aa58f95c2..c5b259a2dfc2ba684f6881dfb2a9a79b3a36032a 100644
--- a/src/context/service/database/ConfigRule.py
+++ b/src/context/service/database/ConfigRule.py
@@ -21,7 +21,8 @@ from typing import Dict, List, Optional, Set
 from common.proto.context_pb2 import ConfigRule
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from .models.enums.ConfigAction import ORM_ConfigActionEnum, grpc_to_enum__config_action
-from .models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel
+from .models.ConfigRuleModel import (
+    ConfigRuleKindEnum, DeviceConfigRuleModel, ServiceConfigRuleModel, SliceConfigRuleModel)
 from .uuids._Builder import get_uuid_from_string
 from .uuids.EndPoint import endpoint_get_uuid
 
@@ -83,6 +84,16 @@ def upsert_config_rules(
     session : Session, config_rules : List[Dict], is_delete : bool = False,
     device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None,
 ) -> bool:
+    if device_uuid is not None and service_uuid is None and slice_uuid is None:
+        klass = DeviceConfigRuleModel
+    elif device_uuid is None and service_uuid is not None and slice_uuid is None:
+        klass = ServiceConfigRuleModel
+    elif device_uuid is None and service_uuid is None and slice_uuid is not None:
+        klass = SliceConfigRuleModel
+    else:
+        MSG = 'DataModel cannot be identified (device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})'
+        raise Exception(MSG.format(str(device_uuid), str(service_uuid), str(slice_uuid)))
+
     uuids_to_delete : Set[str] = set()
     uuids_to_upsert : Dict[str, int] = dict()
     rules_to_upsert : List[Dict] = list()
@@ -108,11 +119,11 @@ def upsert_config_rules(
 
     delete_affected = False
     if len(uuids_to_delete) > 0:
-        stmt = delete(ConfigRuleModel)
-        if device_uuid  is not None: stmt = stmt.where(ConfigRuleModel.device_uuid  == device_uuid )
-        if service_uuid is not None: stmt = stmt.where(ConfigRuleModel.service_uuid == service_uuid)
-        if slice_uuid   is not None: stmt = stmt.where(ConfigRuleModel.slice_uuid   == slice_uuid  )
-        stmt = stmt.where(ConfigRuleModel.configrule_uuid.in_(uuids_to_delete))
+        stmt = delete(klass)
+        if device_uuid  is not None: stmt = stmt.where(klass.device_uuid  == device_uuid )
+        if service_uuid is not None: stmt = stmt.where(klass.service_uuid == service_uuid)
+        if slice_uuid   is not None: stmt = stmt.where(klass.slice_uuid   == slice_uuid  )
+        stmt = stmt.where(klass.configrule_uuid.in_(uuids_to_delete))
         #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True})
         #LOGGER.warning('delete stmt={:s}'.format(str(str_stmt)))
         configrule_deletes = session.execute(stmt)
@@ -121,9 +132,9 @@ def upsert_config_rules(
 
     upsert_affected = False
     if len(rules_to_upsert) > 0:
-        stmt = insert(ConfigRuleModel).values(rules_to_upsert)
+        stmt = insert(klass).values(rules_to_upsert)
         stmt = stmt.on_conflict_do_update(
-            index_elements=[ConfigRuleModel.configrule_uuid],
+            index_elements=[klass.configrule_uuid],
             set_=dict(
                 position   = stmt.excluded.position,
                 action     = stmt.excluded.action,
@@ -131,7 +142,7 @@ def upsert_config_rules(
                 updated_at = stmt.excluded.updated_at,
             )
         )
-        stmt = stmt.returning(ConfigRuleModel.created_at, ConfigRuleModel.updated_at)
+        stmt = stmt.returning(klass.created_at, klass.updated_at)
         #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True})
         #LOGGER.warning('upsert stmt={:s}'.format(str(str_stmt)))
         configrule_updates = session.execute(stmt).fetchall()
diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py
index a3edb8ea2838d9203a810677da495893a2cd6973..80d3b3a6d437986741ee5308205d8a902e897c40 100644
--- a/src/context/service/database/Connection.py
+++ b/src/context/service/database/Connection.py
@@ -16,7 +16,7 @@ import datetime, logging, re
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.exc import IntegrityError
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Connection, ConnectionId, ServiceId
@@ -40,7 +40,11 @@ def connection_list_ids(db_engine : Engine, request : ServiceId) -> List[Dict]:
 def connection_list_objs(db_engine : Engine, request : ServiceId) -> List[Dict]:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all()
+        obj_list : List[ConnectionModel] = session.query(ConnectionModel)\
+            .options(selectinload(ConnectionModel.connection_service))\
+            .options(selectinload(ConnectionModel.connection_endpoints))\
+            .options(selectinload(ConnectionModel.connection_subservices))\
+            .filter_by(service_uuid=service_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -48,6 +52,9 @@ def connection_get(db_engine : Engine, request : ConnectionId) -> Dict:
     connection_uuid = connection_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[ConnectionModel] = session.query(ConnectionModel)\
+            .options(selectinload(ConnectionModel.connection_service))\
+            .options(selectinload(ConnectionModel.connection_endpoints))\
+            .options(selectinload(ConnectionModel.connection_subservices))\
             .filter_by(connection_uuid=connection_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py
index 3a73f6589f9332aa4c84f8f296f2cb56db3048bf..592d7f4c545a222092ca95924afafa69d2798d7c 100644
--- a/src/context/service/database/Constraint.py
+++ b/src/context/service/database/Constraint.py
@@ -20,7 +20,7 @@ from sqlalchemy.orm import Session
 from typing import Dict, List, Optional
 from common.proto.context_pb2 import Constraint
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from .models.ConstraintModel import ConstraintKindEnum, ConstraintModel
+from .models.ConstraintModel import ConstraintKindEnum, ServiceConstraintModel, SliceConstraintModel
 from .uuids._Builder import get_uuid_from_string
 from .uuids.EndPoint import endpoint_get_uuid
 
@@ -84,6 +84,14 @@ def upsert_constraints(
     session : Session, constraints : List[Dict], is_delete : bool = False,
     service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None
 ) -> bool:
+    if service_uuid is not None and slice_uuid is None:
+        klass = ServiceConstraintModel
+    elif service_uuid is None and slice_uuid is not None:
+        klass = SliceConstraintModel
+    else:
+        MSG = 'DataModel cannot be identified (service_uuid={:s}, slice_uuid={:s})'
+        raise Exception(MSG.format(str(service_uuid), str(slice_uuid)))
+
     uuids_to_upsert : Dict[str, int] = dict()
     rules_to_upsert : List[Dict] = list()
     for constraint in constraints:
@@ -100,10 +108,10 @@ def upsert_constraints(
     # Delete all constraints not in uuids_to_upsert
     delete_affected = False
     if len(uuids_to_upsert) > 0:
-        stmt = delete(ConstraintModel)
-        if service_uuid is not None: stmt = stmt.where(ConstraintModel.service_uuid == service_uuid)
-        if slice_uuid   is not None: stmt = stmt.where(ConstraintModel.slice_uuid   == slice_uuid  )
-        stmt = stmt.where(ConstraintModel.constraint_uuid.not_in(set(uuids_to_upsert.keys())))
+        stmt = delete(klass)
+        if service_uuid is not None: stmt = stmt.where(klass.service_uuid == service_uuid)
+        if slice_uuid   is not None: stmt = stmt.where(klass.slice_uuid   == slice_uuid  )
+        stmt = stmt.where(klass.constraint_uuid.not_in(set(uuids_to_upsert.keys())))
         #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True})
         #LOGGER.warning('delete stmt={:s}'.format(str(str_stmt)))
         constraint_deletes = session.execute(stmt)
@@ -112,16 +120,16 @@ def upsert_constraints(
 
     upsert_affected = False
     if not is_delete and len(constraints) > 0:
-        stmt = insert(ConstraintModel).values(constraints)
+        stmt = insert(klass).values(constraints)
         stmt = stmt.on_conflict_do_update(
-            index_elements=[ConstraintModel.constraint_uuid],
+            index_elements=[klass.constraint_uuid],
             set_=dict(
                 position   = stmt.excluded.position,
                 data       = stmt.excluded.data,
                 updated_at = stmt.excluded.updated_at,
             )
         )
-        stmt = stmt.returning(ConstraintModel.created_at, ConstraintModel.updated_at)
+        stmt = stmt.returning(klass.created_at, klass.updated_at)
         #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True})
         #LOGGER.warning('upsert stmt={:s}'.format(str(str_stmt)))
         constraint_updates = session.execute(stmt).fetchall()
diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py
index 9e05e54b38d3772ece2d87de0d98fb5a216088de..4654095034749e1de985705b242ba9fa05a82f6a 100644
--- a/src/context/service/database/Context.py
+++ b/src/context/service/database/Context.py
@@ -15,7 +15,7 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Context, ContextId
@@ -34,14 +34,22 @@ def context_list_ids(db_engine : Engine) -> List[Dict]:
 
 def context_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ContextModel] = session.query(ContextModel).all()
+        obj_list : List[ContextModel] = session.query(ContextModel)\
+            .options(selectinload(ContextModel.topologies))\
+            .options(selectinload(ContextModel.services))\
+            .options(selectinload(ContextModel.slices))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def context_get(db_engine : Engine, request : ContextId) -> Dict:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[ContextModel] = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
+        obj : Optional[ContextModel] = session.query(ContextModel)\
+            .options(selectinload(ContextModel.topologies))\
+            .options(selectinload(ContextModel.services))\
+            .options(selectinload(ContextModel.slices))\
+            .filter_by(context_uuid=context_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py
index c5a19c9c4b0bca4f85ffe1211dbefc6b218d518e..3e106bc158ab804c7eada7284e9d1b883eb66264 100644
--- a/src/context/service/database/Device.py
+++ b/src/context/service/database/Device.py
@@ -15,12 +15,12 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
-from common.proto.context_pb2 import Device, DeviceId, TopologyId
-from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.proto.context_pb2 import Device, DeviceFilter, DeviceId, TopologyId
+#from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Device import json_device_id
 from context.service.database.uuids.Topology import topology_get_uuid
 from .models.DeviceModel import DeviceModel
@@ -43,14 +43,22 @@ def device_list_ids(db_engine : Engine) -> List[Dict]:
 
 def device_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[DeviceModel] = session.query(DeviceModel).all()
+        obj_list : List[DeviceModel] = session.query(DeviceModel)\
+            .options(selectinload(DeviceModel.endpoints))\
+            .options(selectinload(DeviceModel.config_rules))\
+            .all()
+            #.options(selectinload(DeviceModel.components))\
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def device_get(db_engine : Engine, request : DeviceId) -> Dict:
     device_uuid = device_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[DeviceModel] = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none()
+        obj : Optional[DeviceModel] = session.query(DeviceModel)\
+            .options(selectinload(DeviceModel.endpoints))\
+            .options(selectinload(DeviceModel.config_rules))\
+            .filter_by(device_uuid=device_uuid).one_or_none()
+            #.options(selectinload(DeviceModel.components))\
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -163,7 +171,9 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
             endpoint_updates = session.execute(stmt).fetchall()
             updated_endpoints = any([(updated_at > created_at) for created_at,updated_at in endpoint_updates])
 
-        if len(related_topologies) > 0:
+        if not updated or len(related_topologies) > 1:
+            # Only update topology-device relations when device is created (not updated) or when endpoints are
+            # modified (len(related_topologies) > 1).
             session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing(
                 index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid]
             ))
@@ -182,3 +192,22 @@ def device_delete(db_engine : Engine, request : DeviceId) -> Tuple[Dict, bool]:
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_device_id(device_uuid),deleted
+
+def device_select(db_engine : Engine, request : DeviceFilter) -> List[Dict]:
+    device_uuids = [
+        device_get_uuid(device_id, allow_random=False)
+        for device_id in request.device_ids.device_ids
+    ]
+    dump_params = dict(
+        include_endpoints   =request.include_endpoints,
+        include_config_rules=request.include_config_rules,
+        include_components  =request.include_components,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(DeviceModel)
+        if request.include_endpoints   : query = query.options(selectinload(DeviceModel.endpoints))
+        if request.include_config_rules: query = query.options(selectinload(DeviceModel.config_rules))
+        #if request.include_components  : query = query.options(selectinload(DeviceModel.components))
+        obj_list : List[DeviceModel] = query.filter(DeviceModel.device_uuid.in_(device_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/EndPoint.py b/src/context/service/database/EndPoint.py
index e2f86893abdf62c9675a83b2a80ceed1227b85d4..b0df3bb8101a7b64a148e916178b1c9a77d511af 100644
--- a/src/context/service/database/EndPoint.py
+++ b/src/context/service/database/EndPoint.py
@@ -14,7 +14,7 @@
 
 import logging
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List
 from common.proto.context_pb2 import EndPointIdList
@@ -29,7 +29,8 @@ def endpoint_list_names(db_engine : Engine, request : EndPointIdList) -> List[Di
         for endpoint_id in request.endpoint_ids
     }
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[EndPointModel] = \
-            session.query(EndPointModel).filter(EndPointModel.endpoint_uuid.in_(endpoint_uuids)).all()
+        obj_list : List[EndPointModel] = session.query(EndPointModel)\
+            .options(selectinload(EndPointModel.device))\
+            .filter(EndPointModel.endpoint_uuid.in_(endpoint_uuids)).all()
         return [obj.dump_name() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py
index 8d195cb1d548368c4b1d55f70a3d728ee6fd052e..f5bfc9dea5fb81fa8becfedc8ce1e4e0f59e7292 100644
--- a/src/context/service/database/Link.py
+++ b/src/context/service/database/Link.py
@@ -15,7 +15,7 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.proto.context_pb2 import Link, LinkId
@@ -36,14 +36,18 @@ def link_list_ids(db_engine : Engine) -> List[Dict]:
 
 def link_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[LinkModel] = session.query(LinkModel).all()
+        obj_list : List[LinkModel] = session.query(LinkModel)\
+            .options(selectinload(LinkModel.link_endpoints))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def link_get(db_engine : Engine, request : LinkId) -> Dict:
     link_uuid = link_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[LinkModel] = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none()
+        obj : Optional[LinkModel] = session.query(LinkModel)\
+            .options(selectinload(LinkModel.link_endpoints))\
+            .filter_by(link_uuid=link_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -64,13 +68,14 @@ def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]:
     topology_uuids : Set[str] = set()
     related_topologies : List[Dict] = list()
     link_endpoints_data : List[Dict] = list()
-    for endpoint_id in request.link_endpoint_ids:
+    for i,endpoint_id in enumerate(request.link_endpoint_ids):
         endpoint_topology_uuid, _, endpoint_uuid = endpoint_get_uuid(
             endpoint_id, allow_random=False)
 
         link_endpoints_data.append({
             'link_uuid'    : link_uuid,
             'endpoint_uuid': endpoint_uuid,
+            'position'     : i,
         })
 
         if endpoint_topology_uuid not in topology_uuids:
diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py
index e95cec4ae533795b23b8fd4e2f26ac9000c1bcce..13f0a2698c17874e1e15f4d6a1d527d366141f56 100644
--- a/src/context/service/database/PolicyRule.py
+++ b/src/context/service/database/PolicyRule.py
@@ -15,7 +15,7 @@
 import datetime, json
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.proto.policy_pb2 import PolicyRule, PolicyRuleId, PolicyRuleIdList, PolicyRuleList
@@ -31,14 +31,15 @@ from .uuids.Service import service_get_uuid
 def policyrule_list_ids(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
-        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
         return [obj.dump_id() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def policyrule_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
-        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
+        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel)\
+            .options(selectinload(PolicyRuleModel.policyrule_service))\
+            .options(selectinload(PolicyRuleModel.policyrule_devices))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -46,6 +47,8 @@ def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule:
     policyrule_uuid = policyrule_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[PolicyRuleModel] = session.query(PolicyRuleModel)\
+            .options(selectinload(PolicyRuleModel.policyrule_service))\
+            .options(selectinload(PolicyRuleModel.policyrule_devices))\
             .filter_by(policyrule_uuid=policyrule_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py
index a81a80c3c2398fed16842bcc3d8aa16342edb72b..b6916dc3a19fef4bde3aff93300e63f360b362c0 100644
--- a/src/context/service/database/Service.py
+++ b/src/context/service/database/Service.py
@@ -13,12 +13,13 @@
 # limitations under the License.
 
 import datetime, logging
+from sqlalchemy import and_
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
-from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import ContextId, Service, ServiceId
+from typing import Dict, List, Optional, Set, Tuple
+from common.proto.context_pb2 import ContextId, Service, ServiceFilter, ServiceId
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Service import json_service_id
@@ -43,14 +44,22 @@ def service_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def service_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[ServiceModel] = session.query(ServiceModel)\
+            .options(selectinload(ServiceModel.service_endpoints))\
+            .options(selectinload(ServiceModel.constraints))\
+            .options(selectinload(ServiceModel.config_rules))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def service_get(db_engine : Engine, request : ServiceId) -> Dict:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[ServiceModel] = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none()
+        obj : Optional[ServiceModel] = session.query(ServiceModel)\
+            .options(selectinload(ServiceModel.service_endpoints))\
+            .options(selectinload(ServiceModel.constraints))\
+            .options(selectinload(ServiceModel.config_rules))\
+            .filter_by(service_uuid=service_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -91,6 +100,7 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
         service_endpoints_data.append({
             'service_uuid' : service_uuid,
             'endpoint_uuid': endpoint_uuid,
+            'position'     : i,
         })
 
     constraints = compose_constraints_data(request.service_constraints, now, service_uuid=service_uuid)
@@ -137,6 +147,45 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_service_id(service_uuid, json_context_id(context_uuid)),updated
 
+def service_unset(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
+    raw_context_uuid = request.service_id.context_id.context_uuid.uuid
+    raw_service_uuid = request.service_id.service_uuid.uuid
+    raw_service_name = request.name
+    service_name = raw_service_uuid if len(raw_service_name) == 0 else raw_service_name
+    context_uuid,service_uuid = service_get_uuid(request.service_id, service_name=service_name, allow_random=False)
+
+    service_endpoint_uuids : Set[str] = set()
+    for i,endpoint_id in enumerate(request.service_endpoint_ids):
+        endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+        if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid
+        if endpoint_context_uuid not in {raw_context_uuid, context_uuid}:
+            raise InvalidArgumentException(
+                'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+                endpoint_context_uuid,
+                ['should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(raw_context_uuid)])
+        service_endpoint_uuids.add(endpoint_get_uuid(endpoint_id, allow_random=False)[2])
+
+    now = datetime.datetime.utcnow()
+    constraints = compose_constraints_data(request.service_constraints, now, service_uuid=service_uuid)
+    config_rules = compose_config_rules_data(request.service_config.config_rules, now, service_uuid=service_uuid)
+
+    def callback(session : Session) -> bool:
+        num_deletes = 0
+        if len(service_endpoint_uuids) > 0:
+            num_deletes += session.query(ServiceEndPointModel)\
+                .filter(and_(
+                    ServiceEndPointModel.service_uuid == service_uuid,
+                    ServiceEndPointModel.endpoint_uuid.in_(service_endpoint_uuids)
+                )).delete()
+
+        changed_constraints = upsert_constraints(session, constraints, is_delete=True, service_uuid=service_uuid)
+        changed_config_rules = upsert_config_rules(session, config_rules, is_delete=True, service_uuid=service_uuid)
+
+        return num_deletes > 0 or changed_constraints or changed_config_rules
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_service_id(service_uuid, json_context_id(context_uuid)),updated
+
 def service_delete(db_engine : Engine, request : ServiceId) -> Tuple[Dict, bool]:
     context_uuid,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
@@ -144,3 +193,22 @@ def service_delete(db_engine : Engine, request : ServiceId) -> Tuple[Dict, bool]
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_service_id(service_uuid, json_context_id(context_uuid)),deleted
+
+def service_select(db_engine : Engine, request : ServiceFilter) -> List[Dict]:
+    service_uuids = [
+        service_get_uuid(service_id, allow_random=False)[1]
+        for service_id in request.service_ids.service_ids
+    ]
+    dump_params = dict(
+        include_endpoint_ids=request.include_endpoint_ids,
+        include_constraints =request.include_constraints,
+        include_config_rules=request.include_config_rules,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(ServiceModel)
+        if request.include_endpoint_ids: query = query.options(selectinload(ServiceModel.service_endpoints))
+        if request.include_constraints : query = query.options(selectinload(ServiceModel.constraints))
+        if request.include_config_rules: query = query.options(selectinload(ServiceModel.config_rules))
+        obj_list : List[ServiceModel] = query.filter(ServiceModel.service_uuid.in_(service_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py
index 1d6781d53f7c85d8cb878b1b38b0de65b4ef5726..abd140024f2a13289c7af6a3bafe363a8247e053 100644
--- a/src/context/service/database/Slice.py
+++ b/src/context/service/database/Slice.py
@@ -16,10 +16,10 @@ import datetime, logging
 from sqlalchemy import and_
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import ContextId, Slice, SliceId
+from common.proto.context_pb2 import ContextId, Slice, SliceFilter, SliceId
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Slice import json_slice_id
@@ -44,14 +44,26 @@ def slice_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def slice_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[SliceModel] = session.query(SliceModel)\
+            .options(selectinload(SliceModel.slice_endpoints))\
+            .options(selectinload(SliceModel.slice_services))\
+            .options(selectinload(SliceModel.slice_subslices))\
+            .options(selectinload(SliceModel.constraints))\
+            .options(selectinload(SliceModel.config_rules))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def slice_get(db_engine : Engine, request : SliceId) -> Dict:
     _,slice_uuid = slice_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[SliceModel] = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).one_or_none()
+        obj : Optional[SliceModel] = session.query(SliceModel)\
+            .options(selectinload(SliceModel.slice_endpoints))\
+            .options(selectinload(SliceModel.slice_services))\
+            .options(selectinload(SliceModel.slice_subslices))\
+            .options(selectinload(SliceModel.constraints))\
+            .options(selectinload(SliceModel.config_rules))\
+            .filter_by(slice_uuid=slice_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -91,6 +103,7 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
         slice_endpoints_data.append({
             'slice_uuid'   : slice_uuid,
             'endpoint_uuid': endpoint_uuid,
+            'position'     : i,
         })
 
     slice_services_data : List[Dict] = list()
@@ -239,3 +252,26 @@ def slice_delete(db_engine : Engine, request : SliceId) -> Tuple[Dict, bool]:
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_slice_id(slice_uuid, json_context_id(context_uuid)),deleted
+
+def slice_select(db_engine : Engine, request : SliceFilter) -> List[Dict]:
+    slice_uuids = [
+        slice_get_uuid(slice_id, allow_random=False)[1]
+        for slice_id in request.slice_ids.slice_ids
+    ]
+    dump_params = dict(
+        include_endpoint_ids=request.include_endpoint_ids,
+        include_constraints =request.include_constraints,
+        include_service_ids =request.include_service_ids,
+        include_subslice_ids=request.include_subslice_ids,
+        include_config_rules=request.include_config_rules,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(SliceModel)
+        if request.include_endpoint_ids: query = query.options(selectinload(SliceModel.slice_endpoints))
+        if request.include_service_ids : query = query.options(selectinload(SliceModel.slice_services))
+        if request.include_subslice_ids: query = query.options(selectinload(SliceModel.slice_subslices))
+        if request.include_constraints : query = query.options(selectinload(SliceModel.constraints))
+        if request.include_config_rules: query = query.options(selectinload(SliceModel.config_rules))
+        obj_list : List[SliceModel] = query.filter(SliceModel.slice_uuid.in_(slice_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py
index e2c6e2e996ac9321d0d8b9ae2ecea018b650632f..4440299b63f68613854e79998270872389d385cb 100644
--- a/src/context/service/database/Topology.py
+++ b/src/context/service/database/Topology.py
@@ -15,14 +15,16 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import ContextId, Topology, TopologyId
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology_id
-from .models.TopologyModel import TopologyModel
+from .models.DeviceModel import DeviceModel
+from .models.LinkModel import LinkModel
+from .models.TopologyModel import TopologyDeviceModel, TopologyLinkModel, TopologyModel
 from .uuids.Context import context_get_uuid
 from .uuids.Topology import topology_get_uuid
 
@@ -38,7 +40,10 @@ def topology_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def topology_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices))\
+            .options(selectinload(TopologyModel.topology_links))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -46,6 +51,8 @@ def topology_get(db_engine : Engine, request : TopologyId) -> Dict:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices))\
+            .options(selectinload(TopologyModel.topology_links))\
             .filter_by(topology_uuid=topology_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
@@ -62,7 +69,10 @@ def topology_get_details(db_engine : Engine, request : TopologyId) -> Dict:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices, TopologyDeviceModel.device, DeviceModel.endpoints))\
+            .options(selectinload(TopologyModel.topology_links, TopologyLinkModel.link, LinkModel.link_endpoints))\
             .filter_by(topology_uuid=topology_uuid).one_or_none()
+            #.options(selectinload(DeviceModel.components))\
         return None if obj is None else obj.dump_details()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py
index d7bb97cd0fec1037e98c8713b885b2d5141cae63..5d14b62a83b71d7a146f74e649435ed941dad6d3 100644
--- a/src/context/service/database/models/ConfigRuleModel.py
+++ b/src/context/service/database/models/ConfigRuleModel.py
@@ -24,13 +24,11 @@ class ConfigRuleKindEnum(enum.Enum):
     CUSTOM = 'custom'
     ACL    = 'acl'
 
-class ConfigRuleModel(_Base):
-    __tablename__ = 'configrule'
+class DeviceConfigRuleModel(_Base):
+    __tablename__ = 'device_configrule'
 
     configrule_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    device_uuid     = Column(ForeignKey('device.device_uuid',   ondelete='CASCADE'), nullable=True, index=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True)
-    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True, index=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid', ondelete='CASCADE'), nullable=False) #, index=True
     position        = Column(Integer, nullable=False)
     kind            = Column(Enum(ConfigRuleKindEnum), nullable=False)
     action          = Column(Enum(ORM_ConfigActionEnum), nullable=False)
@@ -41,7 +39,51 @@ class ConfigRuleModel(_Base):
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
         #UniqueConstraint('device_uuid',  'position', name='unique_per_device' ),
+    )
+
+    def dump(self) -> Dict:
+        return {
+            'action': self.action.value,
+            self.kind.value: json.loads(self.data),
+        }
+
+class ServiceConfigRuleModel(_Base):
+    __tablename__ = 'service_configrule'
+
+    configrule_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=False) #, index=True
+    position        = Column(Integer, nullable=False)
+    kind            = Column(Enum(ConfigRuleKindEnum), nullable=False)
+    action          = Column(Enum(ORM_ConfigActionEnum), nullable=False)
+    data            = Column(String, nullable=False)
+    created_at      = Column(DateTime, nullable=False)
+    updated_at      = Column(DateTime, nullable=False)
+
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
         #UniqueConstraint('service_uuid', 'position', name='unique_per_service'),
+    )
+
+    def dump(self) -> Dict:
+        return {
+            'action': self.action.value,
+            self.kind.value: json.loads(self.data),
+        }
+
+class SliceConfigRuleModel(_Base):
+    __tablename__ = 'slice_configrule'
+
+    configrule_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=False) #, index=True
+    position        = Column(Integer, nullable=False)
+    kind            = Column(Enum(ConfigRuleKindEnum), nullable=False)
+    action          = Column(Enum(ORM_ConfigActionEnum), nullable=False)
+    data            = Column(String, nullable=False)
+    created_at      = Column(DateTime, nullable=False)
+    updated_at      = Column(DateTime, nullable=False)
+
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
         #UniqueConstraint('slice_uuid',   'position', name='unique_per_slice'  ),
     )
 
diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py
index 156e33c6bb32e237af241035f1d9672b0b419222..f71d4177893d146af2f413781b51930c9909d827 100644
--- a/src/context/service/database/models/ConnectionModel.py
+++ b/src/context/service/database/models/ConnectionModel.py
@@ -59,8 +59,8 @@ class ConnectionEndPointModel(_Base):
     endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position        = Column(Integer, nullable=False)
 
-    connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined')
-    endpoint   = relationship('EndPointModel',   lazy='joined') # back_populates='connection_endpoints'
+    connection = relationship('ConnectionModel', back_populates='connection_endpoints') #, lazy='joined'
+    endpoint   = relationship('EndPointModel',   lazy='selectin') # back_populates='connection_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
@@ -72,5 +72,5 @@ class ConnectionSubServiceModel(_Base):
     connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
     subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
-    connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined')
-    subservice = relationship('ServiceModel',    lazy='joined') # back_populates='connection_subservices'
+    connection = relationship('ConnectionModel', back_populates='connection_subservices') #, lazy='joined'
+    subservice = relationship('ServiceModel',    lazy='selectin') # back_populates='connection_subservices'
diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py
index 2412080c1a2883e7bed85e6e22f389270b3f73bc..cbbe0b5d7280a6f14d645b66abd4df444abb41aa 100644
--- a/src/context/service/database/models/ConstraintModel.py
+++ b/src/context/service/database/models/ConstraintModel.py
@@ -31,12 +31,11 @@ class ConstraintKindEnum(enum.Enum):
     SLA_AVAILABILITY  = 'sla_availability'
     SLA_ISOLATION     = 'sla_isolation'
 
-class ConstraintModel(_Base):
-    __tablename__ = 'constraint'
+class ServiceConstraintModel(_Base):
+    __tablename__ = 'service_constraint'
 
     constraint_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True)
-    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True, index=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=False) #, index=True
     position        = Column(Integer, nullable=False)
     kind            = Column(Enum(ConstraintKindEnum), nullable=False)
     data            = Column(String, nullable=False)
@@ -46,6 +45,24 @@ class ConstraintModel(_Base):
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
         #UniqueConstraint('service_uuid', 'position', name='unique_per_service'),
+    )
+
+    def dump(self) -> Dict:
+        return {self.kind.value: json.loads(self.data)}
+
+class SliceConstraintModel(_Base):
+    __tablename__ = 'slice_constraint'
+
+    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=False) #, index=True
+    position        = Column(Integer, nullable=False)
+    kind            = Column(Enum(ConstraintKindEnum), nullable=False)
+    data            = Column(String, nullable=False)
+    created_at      = Column(DateTime, nullable=False)
+    updated_at      = Column(DateTime, nullable=False)
+
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
         #UniqueConstraint('slice_uuid',   'position', name='unique_per_slice'  ),
     )
 
diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py
index 2124386d16e2e33aec58f5b39bf0f89e3c6589f1..beb500d601aa725c5c0d3c01633aebf31aa23e5b 100644
--- a/src/context/service/database/models/DeviceModel.py
+++ b/src/context/service/database/models/DeviceModel.py
@@ -16,7 +16,7 @@ import operator
 from sqlalchemy import Column, DateTime, Enum, String
 from sqlalchemy.dialects.postgresql import ARRAY, UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.DeviceDriver import ORM_DeviceDriverEnum
 from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum
 from ._Base import _Base
@@ -33,25 +33,35 @@ class DeviceModel(_Base):
     updated_at                = Column(DateTime, nullable=False)
 
     #topology_devices = relationship('TopologyDeviceModel', back_populates='device')
-    config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device'
+    config_rules = relationship('DeviceConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device'
     endpoints    = relationship('EndPointModel', passive_deletes=True) # lazy='joined', back_populates='device'
 
     def dump_id(self) -> Dict:
         return {'device_uuid': {'uuid': self.device_uuid}}
 
-    def dump(self) -> Dict:
-        return {
+    def dump_endpoints(self) -> List[Dict]:
+        return [endpoint.dump() for endpoint in self.endpoints]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    def dump_components(self) -> List[Dict]:
+        return []
+
+    def dump(self,
+        include_endpoints : bool = True, include_config_rules : bool = True, include_components : bool = True,
+    ) -> Dict:
+        result = {
             'device_id'                : self.dump_id(),
             'name'                     : self.device_name,
             'device_type'              : self.device_type,
             'device_operational_status': self.device_operational_status.value,
             'device_drivers'           : [driver.value for driver in self.device_drivers],
-            'device_config'            : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
-            'device_endpoints'         : [
-                endpoint.dump()
-                for endpoint in self.endpoints
-            ],
         }
+        if include_endpoints: result['device_endpoints'] = self.dump_endpoints()
+        if include_config_rules: result['device_config'] = self.dump_config_rules()
+        if include_components: result['component'] = self.dump_components()
+        return result
diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py
index 12ba7e10e7c3d5789f9bf16ad7b4f50c35a36bf5..a079f9900e39fdf3a4329e604f4e596e7f5d1f89 100644
--- a/src/context/service/database/models/EndPointModel.py
+++ b/src/context/service/database/models/EndPointModel.py
@@ -31,8 +31,8 @@ class EndPointModel(_Base):
     created_at       = Column(DateTime, nullable=False)
     updated_at       = Column(DateTime, nullable=False)
 
-    device            = relationship('DeviceModel',          back_populates='endpoints')
-    topology          = relationship('TopologyModel')
+    device            = relationship('DeviceModel',          back_populates='endpoints') # lazy='selectin'
+    topology          = relationship('TopologyModel', lazy='selectin')
     #link_endpoints    = relationship('LinkEndPointModel',    back_populates='endpoint' )
     #service_endpoints = relationship('ServiceEndPointModel', back_populates='endpoint' )
 
diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py
index ee591f5c8404cd7f0f6c97651b5f731a51c43303..9c16da3c9146f28352e8b4f7a6f9ab85f870c8b7 100644
--- a/src/context/service/database/models/LinkModel.py
+++ b/src/context/service/database/models/LinkModel.py
@@ -12,7 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from sqlalchemy import Column, DateTime, ForeignKey, String
+import operator
+from sqlalchemy import CheckConstraint, Column, DateTime, ForeignKey, Integer, String
 from sqlalchemy.dialects.postgresql import UUID
 from sqlalchemy.orm import relationship
 from typing import Dict
@@ -38,7 +39,7 @@ class LinkModel(_Base):
             'name'             : self.link_name,
             'link_endpoint_ids': [
                 link_endpoint.endpoint.dump_id()
-                for link_endpoint in self.link_endpoints
+                for link_endpoint in sorted(self.link_endpoints, key=operator.attrgetter('position'))
             ],
         }
 
@@ -47,6 +48,11 @@ class LinkEndPointModel(_Base):
 
     link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True)
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
+    position      = Column(Integer, nullable=False)
 
-    link     = relationship('LinkModel',     back_populates='link_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints'
+    link     = relationship('LinkModel',     back_populates='link_endpoints') #, lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='link_endpoints'
+
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
+    )
diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py
index 2f0c8a326a57a05ab1fd623a968dea0bc39d9e76..32364e289cf68fe760c60eb27cde933f7cf448a4 100644
--- a/src/context/service/database/models/PolicyRuleModel.py
+++ b/src/context/service/database/models/PolicyRuleModel.py
@@ -64,7 +64,7 @@ class PolicyRuleModel(_Base):
             'deviceList': [{'device_uuid': {'uuid': pr_d.device_uuid}} for pr_d in self.policyrule_devices],
         }
         if self.policyrule_kind == PolicyRuleKindEnum.SERVICE:
-            result['serviceId'] = self.policyrule_service.dump_id(),
+            result['serviceId'] = self.policyrule_service.dump_id()
         return {self.policyrule_kind.value: result}
 
 class PolicyRuleDeviceModel(_Base):
@@ -74,4 +74,4 @@ class PolicyRuleDeviceModel(_Base):
     device_uuid     = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices'
-    device     = relationship('DeviceModel',     lazy='joined') # back_populates='policyrule_devices'
+    device     = relationship('DeviceModel',     lazy='selectin') # back_populates='policyrule_devices'
diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py
index 09ff381b5eb374ea752590bba5403fe816319036..2895a7ce9ddfeefd025a9397040a01423c9682a4 100644
--- a/src/context/service/database/models/ServiceModel.py
+++ b/src/context/service/database/models/ServiceModel.py
@@ -13,10 +13,10 @@
 # limitations under the License.
 
 import operator
-from sqlalchemy import Column, DateTime, Enum, ForeignKey, String
+from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
 from sqlalchemy.dialects.postgresql import UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.ServiceStatus import ORM_ServiceStatusEnum
 from .enums.ServiceType import ORM_ServiceTypeEnum
 from ._Base import _Base
@@ -32,10 +32,10 @@ class ServiceModel(_Base):
     created_at     = Column(DateTime, nullable=False)
     updated_at     = Column(DateTime, nullable=False)
 
-    context           = relationship('ContextModel', back_populates='services')
-    service_endpoints = relationship('ServiceEndPointModel') # lazy='joined', back_populates='service'
-    constraints       = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='service'
-    config_rules      = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='service'
+    context           = relationship('ContextModel', back_populates='services', lazy='selectin')
+    service_endpoints = relationship('ServiceEndPointModel') # lazy='selectin', back_populates='service'
+    constraints       = relationship('ServiceConstraintModel', passive_deletes=True) # lazy='selectin', back_populates='service'
+    config_rules      = relationship('ServiceConfigRuleModel', passive_deletes=True) # lazy='selectin', back_populates='service'
 
     def dump_id(self) -> Dict:
         return {
@@ -43,31 +43,48 @@ class ServiceModel(_Base):
             'service_uuid': {'uuid': self.service_uuid},
         }
 
-    def dump(self) -> Dict:
-        return {
-            'service_id'          : self.dump_id(),
-            'name'                : self.service_name,
-            'service_type'        : self.service_type.value,
-            'service_status'      : {'service_status': self.service_status.value},
-            'service_endpoint_ids': [
-                service_endpoint.endpoint.dump_id()
-                for service_endpoint in self.service_endpoints
-            ],
-            'service_constraints' : [
-                constraint.dump()
-                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
-            ],
-            'service_config'      : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
+    def dump_endpoint_ids(self) -> List[Dict]:
+        return [
+            service_endpoint.endpoint.dump_id()
+            for service_endpoint in sorted(self.service_endpoints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_constraints(self) -> List[Dict]:
+        return [
+            constraint.dump()
+            for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    def dump(
+        self, include_endpoint_ids : bool = True, include_constraints : bool = True, include_config_rules : bool = True
+    ) -> Dict:
+        result = {
+            'service_id'    : self.dump_id(),
+            'name'          : self.service_name,
+            'service_type'  : self.service_type.value,
+            'service_status': {'service_status': self.service_status.value},
         }
+        if include_endpoint_ids: result['service_endpoint_ids'] = self.dump_endpoint_ids()
+        if include_constraints: result['service_constraints'] = self.dump_constraints()
+        if include_config_rules: result['service_config'] = self.dump_config_rules()
+        return result
 
 class ServiceEndPointModel(_Base):
     __tablename__ = 'service_endpoint'
 
     service_uuid  = Column(ForeignKey('service.service_uuid',   ondelete='CASCADE' ), primary_key=True)
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
+    position      = Column(Integer, nullable=False)
+
+    service  = relationship('ServiceModel',  back_populates='service_endpoints') # lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='service_endpoints'
 
-    service  = relationship('ServiceModel',  back_populates='service_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints'
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
+    )
diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py
index 2d6c884169154fee8d44c26464416c6708c650b1..d3befa66b4c7ecba4a28fefa745a7c2214f37caf 100644
--- a/src/context/service/database/models/SliceModel.py
+++ b/src/context/service/database/models/SliceModel.py
@@ -13,10 +13,10 @@
 # limitations under the License.
 
 import operator
-from sqlalchemy import Column, DateTime, Enum, ForeignKey, String
+from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
 from sqlalchemy.dialects.postgresql import UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.SliceStatus import ORM_SliceStatusEnum
 from ._Base import _Base
 
@@ -32,13 +32,13 @@ class SliceModel(_Base):
     created_at         = Column(DateTime, nullable=False)
     updated_at         = Column(DateTime, nullable=False)
 
-    context         = relationship('ContextModel', back_populates='slices')
-    slice_endpoints = relationship('SliceEndPointModel') # lazy='joined', back_populates='slice'
-    slice_services  = relationship('SliceServiceModel') # lazy='joined', back_populates='slice'
+    context         = relationship('ContextModel', back_populates='slices', lazy='selectin')
+    slice_endpoints = relationship('SliceEndPointModel') # lazy='selectin', back_populates='slice'
+    slice_services  = relationship('SliceServiceModel') # lazy='selectin', back_populates='slice'
     slice_subslices = relationship(
         'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid')
-    constraints     = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='slice'
-    config_rules    = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='slice'
+    constraints     = relationship('SliceConstraintModel', passive_deletes=True) # lazy='selectin', back_populates='slice'
+    config_rules    = relationship('SliceConfigRuleModel', passive_deletes=True) # lazy='selectin', back_populates='slice'
 
     def dump_id(self) -> Dict:
         return {
@@ -46,45 +46,72 @@ class SliceModel(_Base):
             'slice_uuid': {'uuid': self.slice_uuid},
         }
 
-    def dump(self) -> Dict:
+    def dump_endpoint_ids(self) -> List[Dict]:
+        return [
+            slice_endpoint.endpoint.dump_id()
+            for slice_endpoint in sorted(self.slice_endpoints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_constraints(self) -> List[Dict]:
+        return [
+            constraint.dump()
+            for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    def dump_service_ids(self) -> List[Dict]:
+        return [
+            slice_service.service.dump_id()
+            for slice_service in self.slice_services
+        ]
+
+    def dump_subslice_ids(self) -> List[Dict]:
+        return [
+            slice_subslice.subslice.dump_id()
+            for slice_subslice in self.slice_subslices
+        ]
+
+    def dump_owner_id(self) -> Dict:
         return {
-            'slice_id'          : self.dump_id(),
-            'name'              : self.slice_name,
-            'slice_status'      : {'slice_status': self.slice_status.value},
-            'slice_endpoint_ids': [
-                slice_endpoint.endpoint.dump_id()
-                for slice_endpoint in self.slice_endpoints
-            ],
-            'slice_constraints' : [
-                constraint.dump()
-                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
-            ],
-            'slice_config'      : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
-            'slice_service_ids': [
-                slice_service.service.dump_id()
-                for slice_service in self.slice_services
-            ],
-            'slice_subslice_ids': [
-                slice_subslice.subslice.dump_id()
-                for slice_subslice in self.slice_subslices
-            ],
-            'slice_owner': {
-                'owner_uuid': {'uuid': self.slice_owner_uuid},
-                'owner_string': self.slice_owner_string
-            }
+            'owner_uuid': {'uuid': self.slice_owner_uuid},
+            'owner_string': self.slice_owner_string
+        }
+
+    def dump(
+        self, include_endpoint_ids : bool = True, include_constraints : bool = True, include_service_ids : bool = True,
+        include_subslice_ids : bool = True, include_config_rules : bool = True
+    ) -> Dict:
+        result = {
+            'slice_id'    : self.dump_id(),
+            'name'        : self.slice_name,
+            'slice_status': {'slice_status': self.slice_status.value},
+            'slice_owner' : self.dump_owner_id()
         }
+        if include_endpoint_ids: result['slice_endpoint_ids'] = self.dump_endpoint_ids()
+        if include_constraints : result['slice_constraints' ] = self.dump_constraints()
+        if include_service_ids : result['slice_service_ids' ] = self.dump_service_ids()
+        if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids()
+        if include_config_rules: result['slice_config'      ] = self.dump_config_rules()
+        return result
 
 class SliceEndPointModel(_Base):
     __tablename__ = 'slice_endpoint'
 
     slice_uuid    = Column(ForeignKey('slice.slice_uuid',       ondelete='CASCADE' ), primary_key=True)
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
+    position      = Column(Integer, nullable=False)
+
+    slice    = relationship('SliceModel', back_populates='slice_endpoints') #, lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='slice_endpoints'
 
-    slice    = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints'
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
+    )
 
 class SliceServiceModel(_Base):
     __tablename__ = 'slice_service'
@@ -92,8 +119,8 @@ class SliceServiceModel(_Base):
     slice_uuid   = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE' ), primary_key=True)
     service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
-    slice   = relationship('SliceModel', back_populates='slice_services', lazy='joined')
-    service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services'
+    slice   = relationship('SliceModel', back_populates='slice_services') # , lazy='selectin'
+    service = relationship('ServiceModel', lazy='selectin') # back_populates='slice_services'
 
 class SliceSubSliceModel(_Base):
     __tablename__ = 'slice_subslice'
@@ -102,5 +129,5 @@ class SliceSubSliceModel(_Base):
     subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True)
 
     slice    = relationship(
-        'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined')
-    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined')
+        'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices') #, lazy='selectin'
+    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='selectin')
diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py
index 7dc2333f0a9b979f251c173d850a235dcb822d91..0ed4a038bcf4426f4cf112bd03c5cb36cb42c822 100644
--- a/src/context/service/database/models/TopologyModel.py
+++ b/src/context/service/database/models/TopologyModel.py
@@ -27,7 +27,7 @@ class TopologyModel(_Base):
     created_at    = Column(DateTime, nullable=False)
     updated_at    = Column(DateTime, nullable=False)
 
-    context          = relationship('ContextModel', back_populates='topologies')
+    context          = relationship('ContextModel', back_populates='topologies', lazy='selectin')
     topology_devices = relationship('TopologyDeviceModel') # back_populates='topology'
     topology_links   = relationship('TopologyLinkModel'  ) # back_populates='topology'
 
@@ -46,11 +46,19 @@ class TopologyModel(_Base):
         }
 
     def dump_details(self) -> Dict:
+        devices = [
+            td.device.dump(include_config_rules=False, include_components=False)
+            for td in self.topology_devices
+        ]
+        links = [
+            tl.link.dump()
+            for tl in self.topology_links
+        ]
         return {
             'topology_id': self.dump_id(),
             'name'       : self.topology_name,
-            'devices'    : [td.device.dump() for td in self.topology_devices],
-            'links'      : [tl.link.dump()   for tl in self.topology_links  ],
+            'devices'    : devices,
+            'links'      : links,
         }
 
 class TopologyDeviceModel(_Base):
@@ -59,8 +67,8 @@ class TopologyDeviceModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices'
-    device   = relationship('DeviceModel',   lazy='joined') # back_populates='topology_devices'
+    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_devices'
+    device   = relationship('DeviceModel',   lazy='selectin') # back_populates='topology_devices'
 
 class TopologyLinkModel(_Base):
     __tablename__ = 'topology_link'
@@ -68,5 +76,5 @@ class TopologyLinkModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links'
-    link     = relationship('LinkModel',     lazy='joined') # back_populates='topology_links'
+    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_links'
+    link     = relationship('LinkModel',     lazy='selectin') # back_populates='topology_links'
diff --git a/src/context/service/database/models/_Base.py b/src/context/service/database/models/_Base.py
index a10de60eb8731132ec815de1ff897c06ac12b665..b87b9b06d6adc5825ab5dd84cf64347eb9c26f66 100644
--- a/src/context/service/database/models/_Base.py
+++ b/src/context/service/database/models/_Base.py
@@ -30,23 +30,23 @@ def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None:
         return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields))
 
     statements = [
-        index_storing('configrule_device_uuid_rec_idx', 'configrule', ['device_uuid'], [
-            'service_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        index_storing('device_configrule_device_uuid_rec_idx', 'device_configrule', ['device_uuid'], [
+            'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
         ]),
-        index_storing('configrule_service_uuid_rec_idx', 'configrule', ['service_uuid'], [
-            'device_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        index_storing('service_configrule_service_uuid_rec_idx', 'service_configrule', ['service_uuid'], [
+            'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
         ]),
-        index_storing('configrule_slice_uuid_rec_idx', 'configrule', ['slice_uuid'], [
-            'device_uuid', 'service_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        index_storing('slice_configrule_slice_uuid_rec_idx', 'slice_configrule', ['slice_uuid'], [
+            'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
         ]),
         index_storing('connection_service_uuid_rec_idx', 'connection', ['service_uuid'], [
             'settings', 'created_at', 'updated_at'
         ]),
-        index_storing('constraint_service_uuid_rec_idx', 'constraint', ['service_uuid'], [
-            'slice_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at'
+        index_storing('service_constraint_service_uuid_rec_idx', 'service_constraint', ['service_uuid'], [
+            'position', 'kind', 'data', 'created_at', 'updated_at'
         ]),
-        index_storing('constraint_slice_uuid_rec_idx', 'constraint', ['slice_uuid'], [
-            'service_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at'
+        index_storing('slice_constraint_slice_uuid_rec_idx', 'slice_constraint', ['slice_uuid'], [
+            'position', 'kind', 'data', 'created_at', 'updated_at'
         ]),
         index_storing('endpoint_device_uuid_rec_idx', 'endpoint', ['device_uuid'], [
             'topology_uuid', 'name', 'endpoint_type', 'kpi_sample_types', 'created_at', 'updated_at'
@@ -57,7 +57,6 @@ def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None:
         index_storing('slice_context_uuid_rec_idx', 'slice', ['context_uuid'], [
             'slice_name', 'slice_status', 'slice_owner_uuid', 'slice_owner_string', 'created_at', 'updated_at'
         ]),
-
         index_storing('topology_context_uuid_rec_idx', 'topology', ['context_uuid'], [
             'topology_name', 'created_at', 'updated_at'
         ]),
diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py
index 6997e7dfbff6bc1d4b6452a28f11cdac9aae412f..a612803e235de2c6d2d8c91052416a675a3a3085 100644
--- a/src/context/service/database/models/enums/DeviceDriver.py
+++ b/src/context/service/database/models/enums/DeviceDriver.py
@@ -24,6 +24,7 @@ class ORM_DeviceDriverEnum(enum.Enum):
     IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY
     ONF_TR_352            = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352
     XR                    = DeviceDriverEnum.DEVICEDRIVER_XR
+    IETF_L2VPN            = DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN
 
 grpc_to_enum__device_driver = functools.partial(
     grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum)
diff --git a/src/device/requirements.in b/src/device/requirements.in
index ec29fc7a30278625e950f3eed608281f8c7c5cb8..50b941160937aa09976dd3dda4afab6c69d309bb 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -29,6 +29,7 @@ xmltodict==0.12.0
 tabulate
 ipaddress
 macaddress
+websockets==10.4
 
 # pip's dependency resolver does not take into account installed packages.
 # p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index be40e64ecd25a5c46c23d5ec0a73a2484b65691d..e7fec041802cc661b14617a8ebfec0864c738b39 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -12,11 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
-from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+import grpc, logging, time
+from typing import Dict
+from prometheus_client import Histogram
+from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, safe_and_metered_rpc_method
 from common.method_wrappers.ServiceExceptions import NotFoundException, OperationFailedException
 from common.proto.context_pb2 import (
-    Device, DeviceConfig, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty)
+    Device, DeviceConfig, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty, Link)
 from common.proto.device_pb2 import MonitoringSettings
 from common.proto.device_pb2_grpc import DeviceServiceServicer
 from common.tools.context_queries.Device import get_device
@@ -28,13 +30,17 @@ from .monitoring.MonitoringLoops import MonitoringLoops
 from .ErrorMessages import ERROR_MISSING_DRIVER, ERROR_MISSING_KPI
 from .Tools import (
     check_connect_rules, check_no_endpoints, compute_rules_to_add_delete, configure_rules, deconfigure_rules,
-    populate_config_rules, populate_endpoint_monitoring_resources, populate_endpoints, populate_initial_config_rules,
-    subscribe_kpi, unsubscribe_kpi, update_endpoints)
+    get_device_controller_uuid, populate_config_rules, populate_endpoint_monitoring_resources, populate_endpoints,
+    populate_initial_config_rules, subscribe_kpi, unsubscribe_kpi, update_endpoints)
 
 LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('Device', 'RPC')
 
+METRICS_POOL_DETAILS = MetricsPool('Device', 'exec_details', labels={
+    'step_name': '',
+})
+
 class DeviceServiceServicerImpl(DeviceServiceServicer):
     def __init__(self, driver_instance_cache : DriverInstanceCache, monitoring_loops : MonitoringLoops) -> None:
         LOGGER.debug('Creating Servicer...')
@@ -73,9 +79,16 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
 
             errors = []
 
+            # Sub-devices and sub-links are exposed by intermediate controllers or represent mgmt links.
+            # They are used to assist in path computation algorithms, and/or to identify dependencies
+            # (which controller is in charge of which sub-device).
+            new_sub_devices : Dict[str, Device] = dict()
+            new_sub_links : Dict[str, Link] = dict()
+
             if len(device.device_endpoints) == 0:
                 # created from request, populate endpoints using driver
-                errors.extend(populate_endpoints(device, driver, self.monitoring_loops))
+                errors.extend(populate_endpoints(
+                    device, driver, self.monitoring_loops, new_sub_devices, new_sub_links))
 
             if len(device.device_config.config_rules) == len(connection_config_rules):
                 # created from request, populate config rules using driver
@@ -87,34 +100,63 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
                 for error in errors: LOGGER.error(error)
                 raise OperationFailedException('AddDevice', extra_details=errors)
 
+            device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
             device_id = context_client.SetDevice(device)
 
+            for sub_device in new_sub_devices.values():
+                context_client.SetDevice(sub_device)
+
+            for sub_links in new_sub_links.values():
+                context_client.SetLink(sub_links)
+
             # Update endpoint monitoring resources with UUIDs
-            device_with_uuids = context_client.GetDevice(device_id)
+            device_with_uuids = get_device(
+                context_client, device_id.device_uuid.uuid, rw_copy=False, include_endpoints=True,
+                include_components=False, include_config_rules=False)
             populate_endpoint_monitoring_resources(device_with_uuids, self.monitoring_loops)
 
+            context_client.close()
             return device_id
         finally:
             self.mutex_queues.signal_done(device_uuid)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ConfigureDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId:
+        t0 = time.time()
         device_id = request.device_id
         device_uuid = device_id.device_uuid.uuid
 
         self.mutex_queues.wait_my_turn(device_uuid)
+        t1 = time.time()
         try:
             context_client = ContextClient()
-            device = get_device(context_client, device_uuid, rw_copy=True)
+            t2 = time.time()
+            device = get_device(
+                context_client, device_uuid, rw_copy=True, include_endpoints=False, include_components=False,
+                include_config_rules=True)
             if device is None:
                 raise NotFoundException('Device', device_uuid, extra_details='loading in ConfigureDevice')
 
+            t3 = time.time()
+            device_controller_uuid = get_device_controller_uuid(device)
+            if device_controller_uuid is not None:
+                device = get_device(
+                    context_client, device_controller_uuid, rw_copy=True, include_endpoints=False,
+                    include_components=False, include_config_rules=True)
+                if device is None:
+                    raise NotFoundException(
+                        'Device', device_controller_uuid, extra_details='loading in ConfigureDevice')
+
+            device_uuid = device.device_id.device_uuid.uuid
             driver : _Driver = get_driver(self.driver_instance_cache, device)
             if driver is None:
                 msg = ERROR_MISSING_DRIVER.format(device_uuid=str(device_uuid))
                 raise OperationFailedException('ConfigureDevice', extra_details=msg)
 
             if DeviceDriverEnum.DEVICEDRIVER_P4 in device.device_drivers:
+                device = get_device(
+                    context_client, device_uuid, rw_copy=False, include_endpoints=True, include_components=False,
+                    include_config_rules=True)
                 # P4 Driver, by now, has no means to retrieve endpoints
                 # We allow defining the endpoints manually
                 update_endpoints(request, device)
@@ -126,19 +168,44 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
             if request.device_operational_status != DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED:
                 device.device_operational_status = request.device_operational_status
 
+            t4 = time.time()
             # TODO: use of datastores (might be virtual ones) to enable rollbacks
             resources_to_set, resources_to_delete = compute_rules_to_add_delete(device, request)
 
+            t5 = time.time()
             errors = []
             errors.extend(configure_rules(device, driver, resources_to_set))
+            t6 = time.time()
             errors.extend(deconfigure_rules(device, driver, resources_to_delete))
 
+            t7 = time.time()
             if len(errors) > 0:
                 for error in errors: LOGGER.error(error)
                 raise OperationFailedException('ConfigureDevice', extra_details=errors)
 
+            # Context Performance+Scalability enhancement:
+            # This method, besides P4 logic, does not add/update/delete endpoints.
+            # Remove endpoints to reduce number of inserts done by Context.
+            # TODO: Add logic to inspect endpoints and keep only those ones modified with respect to Context.
+            del device.device_endpoints[:]
+
+            t8 = time.time()
             # Note: Rules are updated by configure_rules() and deconfigure_rules() methods.
             device_id = context_client.SetDevice(device)
+
+            t9 = time.time()
+
+            histogram_duration : Histogram = METRICS_POOL_DETAILS.get_or_create(
+                'ConfigureDevice', MetricTypeEnum.HISTOGRAM_DURATION)
+            histogram_duration.labels(step_name='total'            ).observe(t9-t0)
+            histogram_duration.labels(step_name='wait_queue'       ).observe(t1-t0)
+            histogram_duration.labels(step_name='execution'        ).observe(t9-t1)
+            histogram_duration.labels(step_name='get_device'       ).observe(t3-t2)
+            histogram_duration.labels(step_name='split_rules'      ).observe(t5-t4)
+            histogram_duration.labels(step_name='configure_rules'  ).observe(t6-t5)
+            histogram_duration.labels(step_name='deconfigure_rules').observe(t7-t6)
+            histogram_duration.labels(step_name='set_device'       ).observe(t9-t8)
+
             return device_id
         finally:
             self.mutex_queues.signal_done(device_uuid)
@@ -150,7 +217,9 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
             context_client = ContextClient()
-            device = get_device(context_client, device_uuid, rw_copy=False)
+            device = get_device(
+                context_client, device_uuid, rw_copy=False, include_endpoints=False, include_config_rules=False,
+                include_components=False)
             if device is None:
                 raise NotFoundException('Device', device_uuid, extra_details='loading in DeleteDevice')
             device_uuid = device.device_id.device_uuid.uuid
@@ -169,7 +238,9 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
             context_client = ContextClient()
-            device = get_device(context_client, device_uuid, rw_copy=False)
+            device = get_device(
+                context_client, device_uuid, rw_copy=False, include_endpoints=False, include_components=False,
+                include_config_rules=True)
             if device is None:
                 raise NotFoundException('Device', device_uuid, extra_details='loading in DeleteDevice')
 
@@ -208,7 +279,9 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
             context_client = ContextClient()
-            device = get_device(context_client, device_uuid, rw_copy=False)
+            device = get_device(
+                context_client, device_uuid, rw_copy=False, include_endpoints=False, include_components=False,
+                include_config_rules=True)
             if device is None:
                 raise NotFoundException('Device', device_uuid, extra_details='loading in DeleteDevice')
 
diff --git a/src/device/service/ErrorMessages.py b/src/device/service/ErrorMessages.py
index 1fbea721fdc52bdf759581c0525b30b1206ae844..bb7702e4e629bad43df4870d923f0a1829378e2e 100644
--- a/src/device/service/ErrorMessages.py
+++ b/src/device/service/ErrorMessages.py
@@ -14,9 +14,9 @@
 
 _DEVICE_ID          = 'DeviceId({device_uuid:s})'
 _ENDPOINT_ID        = 'EndpointId({endpoint_uuid:s})'
-_ENDPOINT_DATA      = 'EndpointId({endpoint_data:s})'
 _KPI                = 'Kpi({kpi_uuid:s})'
 _DEVICE_ENDPOINT_ID = _DEVICE_ID + '/' + _ENDPOINT_ID
+_RESOURCE           = 'Resource({resource_data:s})'
 _RESOURCE_KEY       = 'Resource(key={resource_key:s})'
 _RESOURCE_KEY_VALUE = 'Resource(key={resource_key:s}, value={resource_value:s})'
 _SUBSCRIPTION       = 'Subscription(key={subscr_key:s}, duration={subscr_duration:s}, interval={subscr_interval:s})'
@@ -26,7 +26,8 @@ _ERROR              = 'Error({error:s})'
 ERROR_MISSING_DRIVER = _DEVICE_ID + ' has not been added to this Device instance'
 ERROR_MISSING_KPI    = _KPI + ' not found'
 
-ERROR_BAD_ENDPOINT   = _DEVICE_ID + ': GetConfig retrieved malformed ' + _ENDPOINT_DATA
+ERROR_BAD_RESOURCE   = _DEVICE_ID + ': GetConfig retrieved malformed ' + _RESOURCE
+ERROR_UNSUP_RESOURCE = _DEVICE_ID + ': GetConfig retrieved unsupported ' + _RESOURCE
 
 ERROR_GET            = _DEVICE_ID + ': Unable to Get ' + _RESOURCE_KEY + '; ' + _ERROR
 ERROR_GET_INIT       = _DEVICE_ID + ': Unable to Get Initial ' + _RESOURCE_KEY + '; ' + _ERROR
diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py
index 571e8acdab7fc243c22923a69202c89db88c8ce3..cd3af07e3324e50ff43eb5e653c4c46771a5507e 100644
--- a/src/device/service/Tools.py
+++ b/src/device/service/Tools.py
@@ -13,19 +13,20 @@
 # limitations under the License.
 
 import json, logging
-from typing import Any, Dict, List, Tuple, Union
+from typing import Any, Dict, List, Optional, Tuple, Union
 from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException
-from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig
+from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig, Link
 from common.proto.device_pb2 import MonitoringSettings
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.tools.grpc.ConfigRules import update_config_rule_custom
 from common.tools.grpc.Tools import grpc_message_to_json
+from context.client.ContextClient import ContextClient
 from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS
 from .monitoring.MonitoringLoops import MonitoringLoops
 from .ErrorMessages import (
-    ERROR_BAD_ENDPOINT, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET,
-    ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE
+    ERROR_BAD_RESOURCE, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET,
+    ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE, ERROR_UNSUP_RESOURCE
 )
 
 LOGGER = logging.getLogger(__name__)
@@ -77,19 +78,51 @@ def check_no_endpoints(device_endpoints) -> None:
         extra_details='RPC method AddDevice does not accept Endpoints. Endpoints are discovered through '\
                         'interrogation of the physical device.')
 
-def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]:
+def get_device_controller_uuid(device : Device) -> Optional[str]:
+    for config_rule in device.device_config.config_rules:
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        if config_rule.custom.resource_key != '_controller': continue
+        device_controller_id = json.loads(config_rule.custom.resource_value)
+        return device_controller_id['uuid']
+    return None
+
+def populate_endpoints(
+    device : Device, driver : _Driver, monitoring_loops : MonitoringLoops,
+    new_sub_devices : Dict[str, Device], new_sub_links : Dict[str, Link]
+) -> List[str]:
     device_uuid = device.device_id.device_uuid.uuid
+    device_name = device.name
 
     resources_to_get = [RESOURCE_ENDPOINTS]
     results_getconfig = driver.GetConfig(resources_to_get)
+    LOGGER.debug('results_getconfig = {:s}'.format(str(results_getconfig)))
+
+    # first quick pass to identify need of mgmt endpoints and links
+    add_mgmt_port = False
+    for resource_data in results_getconfig:
+        if len(resource_data) != 2: continue
+        resource_key, _ = resource_data
+        if resource_key.startswith('/devices/device'):
+            add_mgmt_port = True
+            break
+
+    if add_mgmt_port:
+        # add mgmt port to main device
+        device_mgmt_endpoint = device.device_endpoints.add()
+        device_mgmt_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
+        device_mgmt_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME
+        device_mgmt_endpoint.endpoint_id.device_id.device_uuid.uuid = device_uuid
+        device_mgmt_endpoint.endpoint_id.endpoint_uuid.uuid = 'mgmt'
+        device_mgmt_endpoint.name = 'mgmt'
+        device_mgmt_endpoint.endpoint_type = 'mgmt'
 
     errors : List[str] = list()
-    for endpoint in results_getconfig:
-        if len(endpoint) != 2:
-            errors.append(ERROR_BAD_ENDPOINT.format(device_uuid=device_uuid, endpoint_data=str(endpoint)))
+    for resource_data in results_getconfig:
+        if len(resource_data) != 2:
+            errors.append(ERROR_BAD_RESOURCE.format(device_uuid=device_uuid, resource_data=str(resource_data)))
             continue
 
-        resource_key, resource_value = endpoint
+        resource_key, resource_value = resource_data
         if isinstance(resource_value, Exception):
             errors.append(ERROR_GET.format(
                 device_uuid=device_uuid, resource_key=str(resource_key), error=str(resource_value)))
@@ -97,19 +130,88 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon
         if resource_value is None:
             continue
 
-        endpoint_uuid = resource_value.get('uuid')
-
-        device_endpoint = device.device_endpoints.add()
-        device_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
-        device_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME
-        device_endpoint.endpoint_id.device_id.device_uuid.uuid = device_uuid
-        device_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid
-        device_endpoint.endpoint_type = resource_value.get('type')
+        if resource_key.startswith('/devices/device'):
+            # create sub-device
+            _sub_device_uuid = resource_value['uuid']
+            _sub_device = Device()
+            _sub_device.device_id.device_uuid.uuid = _sub_device_uuid           # pylint: disable=no-member
+            _sub_device.name = resource_value['name']
+            _sub_device.device_type = resource_value['type']
+            _sub_device.device_operational_status = resource_value['status']
+            
+            # Sub-devices should not have a driver assigned. Instead, they should have
+            # a config rule specifying their controller.
+            #_sub_device.device_drivers.extend(resource_value['drivers'])        # pylint: disable=no-member
+            controller_config_rule = _sub_device.device_config.config_rules.add()
+            controller_config_rule.action = ConfigActionEnum.CONFIGACTION_SET
+            controller_config_rule.custom.resource_key = '_controller'
+            controller = {'uuid': device_uuid, 'name': device_name}
+            controller_config_rule.custom.resource_value = json.dumps(controller, indent=0, sort_keys=True)
+
+            new_sub_devices[_sub_device_uuid] = _sub_device
+
+            # add mgmt port to sub-device
+            _sub_device_mgmt_endpoint = _sub_device.device_endpoints.add()      # pylint: disable=no-member
+            _sub_device_mgmt_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
+            _sub_device_mgmt_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME
+            _sub_device_mgmt_endpoint.endpoint_id.device_id.device_uuid.uuid = _sub_device_uuid
+            _sub_device_mgmt_endpoint.endpoint_id.endpoint_uuid.uuid = 'mgmt'
+            _sub_device_mgmt_endpoint.name = 'mgmt'
+            _sub_device_mgmt_endpoint.endpoint_type = 'mgmt'
+
+            # add mgmt link
+            _mgmt_link_uuid = '{:s}/{:s}=={:s}/{:s}'.format(device_name, 'mgmt', _sub_device.name, 'mgmt')
+            _mgmt_link = Link()
+            _mgmt_link.link_id.link_uuid.uuid = _mgmt_link_uuid                         # pylint: disable=no-member
+            _mgmt_link.name = _mgmt_link_uuid
+            _mgmt_link.link_endpoint_ids.append(device_mgmt_endpoint.endpoint_id)       # pylint: disable=no-member
+            _mgmt_link.link_endpoint_ids.append(_sub_device_mgmt_endpoint.endpoint_id)  # pylint: disable=no-member
+            new_sub_links[_mgmt_link_uuid] = _mgmt_link
+
+        elif resource_key.startswith('/endpoints/endpoint'):
+            endpoint_uuid = resource_value['uuid']
+            _device_uuid = resource_value.get('device_uuid')
+            endpoint_name = resource_value.get('name')
+
+            if _device_uuid is None:
+                # add endpoint to current device
+                device_endpoint = device.device_endpoints.add()
+                device_endpoint.endpoint_id.device_id.device_uuid.uuid = device_uuid
+            else:
+                # add endpoint to specified device
+                device_endpoint = new_sub_devices[_device_uuid].device_endpoints.add()
+                device_endpoint.endpoint_id.device_id.device_uuid.uuid = _device_uuid
+
+            device_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
+            device_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME
+            
+            device_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid
+            if endpoint_name is not None: device_endpoint.name = endpoint_name
+            device_endpoint.endpoint_type = resource_value.get('type', '-')
+
+            sample_types : Dict[int, str] = resource_value.get('sample_types', {})
+            for kpi_sample_type, monitor_resource_key in sample_types.items():
+                device_endpoint.kpi_sample_types.append(kpi_sample_type)
+                monitoring_loops.add_resource_key(device_uuid, endpoint_uuid, kpi_sample_type, monitor_resource_key)
+
+        elif resource_key.startswith('/links/link'):
+            # create sub-link
+            _sub_link_uuid = resource_value['uuid']
+            _sub_link = Link()
+            _sub_link.link_id.link_uuid.uuid = _sub_link_uuid           # pylint: disable=no-member
+            _sub_link.name = resource_value['name']
+            new_sub_links[_sub_link_uuid] = _sub_link
+
+            for device_uuid,endpoint_uuid in resource_value['endpoints']:
+                _sub_link_endpoint_id = _sub_link.link_endpoint_ids.add()      # pylint: disable=no-member
+                _sub_link_endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
+                _sub_link_endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME
+                _sub_link_endpoint_id.device_id.device_uuid.uuid = device_uuid
+                _sub_link_endpoint_id.endpoint_uuid.uuid = endpoint_uuid
 
-        sample_types : Dict[int, str] = resource_value.get('sample_types', {})
-        for kpi_sample_type, monitor_resource_key in sample_types.items():
-            device_endpoint.kpi_sample_types.append(kpi_sample_type)
-            monitoring_loops.add_resource_key(device_uuid, endpoint_uuid, kpi_sample_type, monitor_resource_key)
+        else:
+            errors.append(ERROR_UNSUP_RESOURCE.format(device_uuid=device_uuid, resource_data=str(resource_data)))
+            continue
 
     return errors
 
diff --git a/src/device/service/driver_api/ImportTopologyEnum.py b/src/device/service/driver_api/ImportTopologyEnum.py
new file mode 100644
index 0000000000000000000000000000000000000000..06f0ff9c2db1f1baccc4b46c5babc4458ca6ffb6
--- /dev/null
+++ b/src/device/service/driver_api/ImportTopologyEnum.py
@@ -0,0 +1,37 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+from typing import Dict
+
+class ImportTopologyEnum(Enum):
+    # While importing underlying resources, the driver just imports endpoints and exposes them directly.
+    DISABLED = 'disabled'
+
+    # While importing underlying resources, the driver just imports imports sub-devices but not links
+    # connecting them. The endpoints are exposed in virtual nodes representing the sub-devices.
+    # (a remotely-controlled transport domain might exist between nodes)
+    DEVICES = 'devices'
+
+    # While importing underlying resources, the driver just imports imports sub-devices and links
+    # connecting them. The endpoints are exposed in virtual nodes representing the sub-devices.
+    # (enables to define constrained connectivity between the sub-devices)
+    TOPOLOGY = 'topology'
+
+def get_import_topology(settings : Dict, default : ImportTopologyEnum = ImportTopologyEnum.DISABLED):
+    str_import_topology = settings.get('import_topology')
+    if str_import_topology is None: return default
+    import_topology = ImportTopologyEnum._value2member_map_.get(str_import_topology) # pylint: disable=no-member
+    if import_topology is None: raise Exception('Unexpected setting value')
+    return import_topology
diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py
index cc9f7a2c63f2f841b864cbe4fa596464a6783cec..947bc8570a941f8f666c87647d89c315b1bd202a 100644
--- a/src/device/service/driver_api/_Driver.py
+++ b/src/device/service/driver_api/_Driver.py
@@ -22,6 +22,7 @@ RESOURCE_ENDPOINTS = '__endpoints__'
 RESOURCE_INTERFACES = '__interfaces__'
 RESOURCE_NETWORK_INSTANCES = '__network_instances__'
 RESOURCE_ROUTING_POLICIES = '__routing_policies__'
+RESOURCE_SERVICES = '__services__'
 RESOURCE_ACL = '__acl__'
 
 
diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py
index 469abcad387dc055ba17770e4f405db1d1ceaa3b..b3b485a471899dd96a4985fedb4bb6ede2432921 100644
--- a/src/device/service/drivers/__init__.py
+++ b/src/device/service/drivers/__init__.py
@@ -74,6 +74,15 @@ DRIVERS.append(
         #}
     ]))
 
+from .ietf_l2vpn.IetfL2VpnDriver import IetfL2VpnDriver # pylint: disable=wrong-import-position
+DRIVERS.append(
+    (IetfL2VpnDriver, [
+        {
+            FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER,
+            FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN,
+        }
+    ]))
+
 if LOAD_ALL_DEVICE_DRIVERS:
     from .openconfig.OpenConfigDriver import OpenConfigDriver # pylint: disable=wrong-import-position
     DRIVERS.append(
diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py
index 14925f9f78d143cd998065a43afb624b20c04bfb..2acb288784d6da5b202f14c2534ee1a59486a20e 100644
--- a/src/device/service/drivers/emulated/EmulatedDriver.py
+++ b/src/device/service/drivers/emulated/EmulatedDriver.py
@@ -19,7 +19,7 @@ from apscheduler.executors.pool import ThreadPoolExecutor
 from apscheduler.job import Job
 from apscheduler.jobstores.memory import MemoryJobStore
 from apscheduler.schedulers.background import BackgroundScheduler
-from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk_type
 from device.service.driver_api._Driver import _Driver
 from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value
@@ -31,23 +31,7 @@ LOGGER = logging.getLogger(__name__)
 
 RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'^\/interface\[([^\]]+)\].*')
 
-HISTOGRAM_BUCKETS = (
-    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
-    0.0001, 0.00025, 0.00050, 0.00075,
-    0.0010, 0.0025, 0.0050, 0.0075,
-    0.0100, 0.0250, 0.0500, 0.0750,
-    0.1000, 0.2500, 0.5000, 0.7500,
-    1.0000, 2.5000, 5.0000, 7.5000,
-    10.0, 25.0, 50.0, 75.0,
-    100.0, INF
-)
 METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'emulated'})
-METRICS_POOL.get_or_create('GetInitialConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('GetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SubscribeState',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('UnsubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
 
 class EmulatedDriver(_Driver):
     def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called
diff --git a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
new file mode 100644
index 0000000000000000000000000000000000000000..96dfd2c15f6b359e254a6d6a24dfe42a546833ce
--- /dev/null
+++ b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
@@ -0,0 +1,188 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, threading
+from typing import Any, Iterator, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.EndPoint import json_endpoint_id
+from common.type_checkers.Checkers import chk_string, chk_type
+from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES
+from device.service.drivers.ietf_l2vpn.TfsDebugApiClient import TfsDebugApiClient
+from .Tools import connection_point, wim_mapping
+from .WimconnectorIETFL2VPN import WimconnectorIETFL2VPN
+
+LOGGER = logging.getLogger(__name__)
+
+def service_exists(wim : WimconnectorIETFL2VPN, service_uuid : str) -> bool:
+    try:
+        wim.get_connectivity_service_status(service_uuid)
+        return True
+    except: # pylint: disable=bare-except
+        return False
+
+ALL_RESOURCE_KEYS = [
+    RESOURCE_ENDPOINTS,
+    RESOURCE_SERVICES,
+]
+
+SERVICE_TYPE = 'ELINE'
+
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'ietf_l2vpn'})
+
+class IetfL2VpnDriver(_Driver):
+    def __init__(self, address: str, port: int, **settings) -> None:    # pylint: disable=super-init-not-called
+        self.__lock = threading.Lock()
+        self.__started = threading.Event()
+        self.__terminate = threading.Event()
+        username = settings.get('username')
+        password = settings.get('password')
+        scheme = settings.get('scheme', 'http')
+        wim = {'wim_url': '{:s}://{:s}:{:d}'.format(scheme, address, int(port))}
+        wim_account = {'user': username, 'password': password}
+        # Mapping updated dynamically with each request
+        config = {'mapping_not_needed': False, 'service_endpoint_mapping': []}
+        self.dac = TfsDebugApiClient(address, int(port), scheme=scheme, username=username, password=password)
+        self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config)
+        self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors
+
+    def Connect(self) -> bool:
+        with self.__lock:
+            try:
+                self.wim.check_credentials()
+            except Exception:  # pylint: disable=broad-except
+                LOGGER.exception('Exception checking credentials')
+                return False
+            else:
+                self.__started.set()
+                return True
+
+    def Disconnect(self) -> bool:
+        with self.__lock:
+            self.__terminate.set()
+            return True
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetInitialConfig(self) -> List[Tuple[str, Any]]:
+        with self.__lock:
+            return []
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+        chk_type('resources', resource_keys, list)
+        results = []
+        with self.__lock:
+            self.wim.check_credentials()
+            if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS
+            for i, resource_key in enumerate(resource_keys):
+                str_resource_name = 'resource_key[#{:d}]'.format(i)
+                try:
+                    chk_string(str_resource_name, resource_key, allow_empty=False)
+                    if resource_key == RESOURCE_ENDPOINTS:
+                        # return endpoints through debug-api and list-devices method
+                        results.extend(self.dac.get_devices_endpoints())
+                    elif resource_key == RESOURCE_SERVICES:
+                        # return all services through 
+                        reply = self.wim.get_all_active_connectivity_services()
+                        results.extend(reply.json())
+                    else:
+                        # assume single-service retrieval
+                        reply = self.wim.get_connectivity_service(resource_key)
+                        results.append(reply.json())
+                except Exception as e: # pylint: disable=broad-except
+                    LOGGER.exception('Unhandled error processing resource_key({:s})'.format(str(resource_key)))
+                    results.append((resource_key, e))
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        results = []
+        if len(resources) == 0: return results
+        with self.__lock:
+            self.wim.check_credentials()
+            for resource in resources:
+                LOGGER.info('resource = {:s}'.format(str(resource)))
+                resource_key,resource_value = resource
+                try:
+                    resource_value = json.loads(resource_value)
+                    service_uuid = resource_value['uuid']
+
+                    if service_exists(self.wim, service_uuid):
+                        exc = NotImplementedError('IETF L2VPN Service Update is still not supported')
+                        results.append((resource[0], exc))
+                        continue
+
+                    src_device_name   = resource_value['src_device_name']
+                    src_endpoint_name = resource_value['src_endpoint_name']
+                    dst_device_name   = resource_value['dst_device_name']
+                    dst_endpoint_name = resource_value['dst_endpoint_name']
+                    encap_type        = resource_value['encapsulation_type']
+                    vlan_id           = resource_value['vlan_id']
+
+                    src_endpoint_id = json_endpoint_id(json_device_id(src_device_name), src_endpoint_name)
+                    src_service_endpoint_id, src_mapping = wim_mapping('1', src_endpoint_id)
+                    self.wim.mappings[src_service_endpoint_id] = src_mapping
+
+                    dst_endpoint_id = json_endpoint_id(json_device_id(dst_device_name), dst_endpoint_name)
+                    dst_service_endpoint_id, dst_mapping = wim_mapping('2', dst_endpoint_id)
+                    self.wim.mappings[dst_service_endpoint_id] = dst_mapping
+
+                    connection_points = [
+                        connection_point(src_service_endpoint_id, encap_type, vlan_id),
+                        connection_point(dst_service_endpoint_id, encap_type, vlan_id),
+                    ]
+
+                    self.wim.create_connectivity_service(service_uuid, SERVICE_TYPE, connection_points)
+                    results.append((resource_key, True))
+                except Exception as e: # pylint: disable=broad-except
+                    LOGGER.exception('Unhandled error processing resource_key({:s})'.format(str(resource_key)))
+                    results.append((resource_key, e))
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        results = []
+        if len(resources) == 0: return results
+        with self.__lock:
+            self.wim.check_credentials()
+            for resource in resources:
+                LOGGER.info('resource = {:s}'.format(str(resource)))
+                resource_key,resource_value = resource
+                try:
+                    resource_value = json.loads(resource_value)
+                    service_uuid = resource_value['uuid']
+
+                    if service_exists(self.wim, service_uuid):
+                        self.wim.delete_connectivity_service(service_uuid)
+                    results.append((resource_key, True))
+                except Exception as e: # pylint: disable=broad-except
+                    LOGGER.exception('Unhandled error processing resource_key({:s})'.format(str(resource_key)))
+                    results.append((resource_key, e))
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        # TODO: IETF L2VPN does not support monitoring by now
+        return [False for _ in subscriptions]
+
+    @metered_subclass_method(METRICS_POOL)
+    def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        # TODO: IETF L2VPN does not support monitoring by now
+        return [False for _ in subscriptions]
+
+    def GetState(
+        self, blocking=False, terminate : Optional[threading.Event] = None
+    ) -> Iterator[Tuple[float, str, Any]]:
+        # TODO: IETF L2VPN does not support monitoring by now
+        return []
diff --git a/src/device/service/drivers/ietf_l2vpn/TfsDebugApiClient.py b/src/device/service/drivers/ietf_l2vpn/TfsDebugApiClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bf40af030fda990f96efe0ff8ab2ce54f82c312
--- /dev/null
+++ b/src/device/service/drivers/ietf_l2vpn/TfsDebugApiClient.py
@@ -0,0 +1,92 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, requests
+from requests.auth import HTTPBasicAuth
+from typing import Dict, List, Optional
+
+GET_DEVICES_URL = '{:s}://{:s}:{:d}/restconf/debug-api/devices'
+TIMEOUT = 30
+
+HTTP_OK_CODES = {
+    200,    # OK
+    201,    # Created
+    202,    # Accepted
+    204,    # No Content
+}
+
+MAPPING_STATUS = {
+    'DEVICEOPERATIONALSTATUS_UNDEFINED': 0,
+    'DEVICEOPERATIONALSTATUS_DISABLED' : 1,
+    'DEVICEOPERATIONALSTATUS_ENABLED'  : 2,
+}
+
+MAPPING_DRIVER = {
+    'DEVICEDRIVER_UNDEFINED'            : 0,
+    'DEVICEDRIVER_OPENCONFIG'           : 1,
+    'DEVICEDRIVER_TRANSPORT_API'        : 2,
+    'DEVICEDRIVER_P4'                   : 3,
+    'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4,
+    'DEVICEDRIVER_ONF_TR_352'           : 5,
+    'DEVICEDRIVER_XR'                   : 6,
+    'DEVICEDRIVER_IETF_L2VPN'           : 7,
+}
+
+MSG_ERROR = 'Could not retrieve devices in remote TeraFlowSDN instance({:s}). status_code={:s} reply={:s}'
+
+LOGGER = logging.getLogger(__name__)
+
+class TfsDebugApiClient:
+    def __init__(
+        self, address : str, port : int, scheme : str = 'http',
+        username : Optional[str] = None, password : Optional[str] = None
+    ) -> None:
+        self._url = GET_DEVICES_URL.format(scheme, address, port)
+        self._auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
+
+    def get_devices_endpoints(self) -> List[Dict]:
+        reply = requests.get(self._url, timeout=TIMEOUT, verify=False, auth=self._auth)
+        if reply.status_code not in HTTP_OK_CODES:
+            msg = MSG_ERROR.format(str(self._url), str(reply.status_code), str(reply))
+            LOGGER.error(msg)
+            raise Exception(msg)
+
+        result = list()
+        for json_device in reply.json()['devices']:
+            device_uuid : str = json_device['device_id']['device_uuid']['uuid']
+            device_type : str = json_device['device_type']
+            #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type
+            device_status = json_device['device_operational_status']
+            device_url = '/devices/device[{:s}]'.format(device_uuid)
+            device_data = {
+                'uuid': json_device['device_id']['device_uuid']['uuid'],
+                'name': json_device['name'],
+                'type': device_type,
+                'status': MAPPING_STATUS[device_status],
+                'drivers': [MAPPING_DRIVER[driver] for driver in json_device['device_drivers']],
+            }
+            result.append((device_url, device_data))
+
+            for json_endpoint in json_device['device_endpoints']:
+                endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid']
+                endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid)
+                endpoint_data = {
+                    'device_uuid': device_uuid,
+                    'uuid': endpoint_uuid,
+                    'name': json_endpoint['name'],
+                    'type': json_endpoint['endpoint_type'],
+                }
+                result.append((endpoint_url, endpoint_data))
+
+        return result
diff --git a/src/device/service/drivers/ietf_l2vpn/Tools.py b/src/device/service/drivers/ietf_l2vpn/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..45dfa23c984e175c01efa77371e94454b98ea94e
--- /dev/null
+++ b/src/device/service/drivers/ietf_l2vpn/Tools.py
@@ -0,0 +1,48 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Optional
+
+def compose_service_endpoint_id(site_id : str, endpoint_id : Dict):
+    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
+    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
+    return ':'.join([site_id, device_uuid, endpoint_uuid])
+
+def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, priority=None, redundant=[]):
+    ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
+    ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
+    service_endpoint_id = compose_service_endpoint_id(site_id, ce_endpoint_id)
+    if pe_device_id is None:
+        bearer = '{:s}:{:s}'.format(ce_device_uuid, ce_endpoint_uuid)
+    else:
+        pe_device_uuid = pe_device_id['device_uuid']['uuid']
+        bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
+    mapping = {
+        'service_endpoint_id': service_endpoint_id,
+        'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
+        'service_mapping_info': {
+            'site-id': site_id,
+            'bearer': {'bearer-reference': bearer},
+        }
+    }
+    if priority is not None: mapping['service_mapping_info']['priority'] = priority
+    if len(redundant) > 0: mapping['service_mapping_info']['redundant'] = redundant
+    return service_endpoint_id, mapping
+
+def connection_point(service_endpoint_id : str, encapsulation_type : str, vlan_id : int):
+    return {
+        'service_endpoint_id': service_endpoint_id,
+        'service_endpoint_encapsulation_type': encapsulation_type,
+        'service_endpoint_encapsulation_info': {'vlan': vlan_id}
+    }
diff --git a/src/device/service/drivers/ietf_l2vpn/WimconnectorIETFL2VPN.py b/src/device/service/drivers/ietf_l2vpn/WimconnectorIETFL2VPN.py
new file mode 100644
index 0000000000000000000000000000000000000000..34ff184c022f379e7420de237bd08fc1dc6282a6
--- /dev/null
+++ b/src/device/service/drivers/ietf_l2vpn/WimconnectorIETFL2VPN.py
@@ -0,0 +1,565 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 Telefonica
+# All Rights Reserved.
+#
+# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This work has been performed in the context of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 program.
+##
+"""The SDN/WIM connector is responsible for establishing wide area network
+connectivity.
+
+This SDN/WIM connector implements the standard IETF RFC 8466 "A YANG Data
+ Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery"
+
+It receives the endpoints and the necessary details to request
+the Layer 2 service.
+"""
+import requests
+import uuid
+import logging
+import copy
+#from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
+from .sdnconn import SdnConnectorBase, SdnConnectorError
+
+"""Check layer where we move it"""
+
+
+class WimconnectorIETFL2VPN(SdnConnectorBase):
+    def __init__(self, wim, wim_account, config=None, logger=None):
+        """IETF L2VPN WIM connector
+
+        Arguments: (To be completed)
+            wim (dict): WIM record, as stored in the database
+            wim_account (dict): WIM account record, as stored in the database
+        """
+        self.logger = logging.getLogger("ro.sdn.ietfl2vpn")
+        super().__init__(wim, wim_account, config, logger)
+        self.headers = {"Content-Type": "application/json"}
+        self.mappings = {
+            m["service_endpoint_id"]: m for m in self.service_endpoint_mapping
+        }
+        self.user = wim_account.get("user")
+        self.passwd = wim_account.get("password")
+
+        if self.user is not None and self.passwd is not None:
+            self.auth = (self.user, self.passwd)
+        else:
+            self.auth = None
+
+        self.logger.info("IETFL2VPN Connector Initialized.")
+
+    def check_credentials(self):
+        endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+            self.wim["wim_url"]
+        )
+
+        try:
+            response = requests.get(endpoint, auth=self.auth)
+            http_code = response.status_code
+        except requests.exceptions.RequestException as e:
+            raise SdnConnectorError(e.response, http_code=503)
+
+        if http_code != 200:
+            raise SdnConnectorError("Failed while authenticating", http_code=http_code)
+
+        self.logger.info("Credentials checked")
+
+    def get_connectivity_service_status(self, service_uuid, conn_info=None):
+        """Monitor the status of the connectivity service stablished
+
+        Arguments:
+            service_uuid: Connectivity service unique identifier
+
+        Returns:
+            Examples::
+                {'sdn_status': 'ACTIVE'}
+                {'sdn_status': 'INACTIVE'}
+                {'sdn_status': 'DOWN'}
+                {'sdn_status': 'ERROR'}
+        """
+        try:
+            self.logger.info("Sending get connectivity service stuatus")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+                self.wim["wim_url"], service_uuid
+            )
+            response = requests.get(servicepoint, auth=self.auth)
+            self.logger.warning('response.status_code={:s}'.format(str(response.status_code)))
+            if response.status_code != requests.codes.ok:
+                raise SdnConnectorError(
+                    "Unable to obtain connectivity servcice status",
+                    http_code=response.status_code,
+                )
+
+            service_status = {"sdn_status": "ACTIVE"}
+
+            return service_status
+        except requests.exceptions.ConnectionError:
+            raise SdnConnectorError("Request Timeout", http_code=408)
+
+    def search_mapp(self, connection_point):
+        id = connection_point["service_endpoint_id"]
+        if id not in self.mappings:
+            raise SdnConnectorError("Endpoint {} not located".format(str(id)))
+        else:
+            return self.mappings[id]
+
+    def create_connectivity_service(self, service_uuid, service_type, connection_points, **kwargs):
+        """Stablish WAN connectivity between the endpoints
+
+        Arguments:
+            service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
+                ``L3``.
+            connection_points (list): each point corresponds to
+                an entry point from the DC to the transport network. One
+                connection point serves to identify the specific access and
+                some other service parameters, such as encapsulation type.
+                Represented by a dict as follows::
+
+                    {
+                      "service_endpoint_id": ..., (str[uuid])
+                      "service_endpoint_encapsulation_type": ...,
+                           (enum: none, dot1q, ...)
+                      "service_endpoint_encapsulation_info": {
+                        ... (dict)
+                        "vlan": ..., (int, present if encapsulation is dot1q)
+                        "vni": ... (int, present if encapsulation is vxlan),
+                        "peers": [(ipv4_1), (ipv4_2)]
+                            (present if encapsulation is vxlan)
+                      }
+                    }
+
+              The service endpoint ID should be previously informed to the WIM
+              engine in the RO when the WIM port mapping is registered.
+
+        Keyword Arguments:
+            bandwidth (int): value in kilobytes
+            latency (int): value in milliseconds
+
+        Other QoS might be passed as keyword arguments.
+
+        Returns:
+            tuple: ``conn_info``:
+               - *conn_info* (dict or None): Information to be stored at the
+                 database (or ``None``). This information will be provided to
+                 the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+                 **MUST** be JSON/YAML-serializable (plain data structures).
+
+        Raises:
+            SdnConnectorException: In case of error.
+        """
+        SETTINGS = {    # min_endpoints, max_endpoints, vpn_service_type
+            'ELINE': (2,    2, 'vpws'), # Virtual Private Wire Service
+            'ELAN' : (2, None, 'vpls'), # Virtual Private LAN  Service
+        }
+        settings = SETTINGS.get(service_type)
+        if settings is None: raise NotImplementedError('Unsupported service_type({:s})'.format(str(service_type)))
+        min_endpoints, max_endpoints, vpn_service_type = settings
+
+        if max_endpoints is not None and len(connection_points) > max_endpoints:
+            msg = "Connections between more than {:d} endpoints are not supported for service_type {:s}"
+            raise SdnConnectorError(msg.format(max_endpoints, service_type))
+
+        if min_endpoints is not None and len(connection_points) < min_endpoints:
+            msg = "Connections must be of at least {:d} endpoints for service_type {:s}"
+            raise SdnConnectorError(msg.format(min_endpoints, service_type))
+
+        """First step, create the vpn service"""
+        vpn_service = {}
+        vpn_service["vpn-id"] = service_uuid
+        vpn_service["vpn-svc-type"] = vpn_service_type
+        vpn_service["svc-topo"] = "any-to-any"
+        vpn_service["customer-name"] = "osm"
+        vpn_service_list = []
+        vpn_service_list.append(vpn_service)
+        vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list}
+        response_service_creation = None
+        conn_info = []
+        self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
+
+        try:
+            endpoint_service_creation = (
+                "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+                    self.wim["wim_url"]
+                )
+            )
+            response_service_creation = requests.post(
+                endpoint_service_creation,
+                headers=self.headers,
+                json=vpn_service_l,
+                auth=self.auth,
+            )
+        except requests.exceptions.ConnectionError:
+            raise SdnConnectorError(
+                "Request to create service Timeout", http_code=408
+            )
+
+        if response_service_creation.status_code == 409:
+            raise SdnConnectorError(
+                "Service already exists",
+                http_code=response_service_creation.status_code,
+            )
+        elif response_service_creation.status_code != requests.codes.created:
+            raise SdnConnectorError(
+                "Request to create service not accepted",
+                http_code=response_service_creation.status_code,
+            )
+
+        self.logger.info('connection_points = {:s}'.format(str(connection_points)))
+
+        # Check if protected paths are requested
+        extended_connection_points = []
+        for connection_point in connection_points:
+            extended_connection_points.append(connection_point)
+
+            connection_point_wan_info = self.search_mapp(connection_point)
+            service_mapping_info = connection_point_wan_info.get('service_mapping_info', {})
+            redundant_service_endpoint_ids = service_mapping_info.get('redundant')
+
+            if redundant_service_endpoint_ids is None: continue
+            if len(redundant_service_endpoint_ids) == 0: continue
+
+            for redundant_service_endpoint_id in redundant_service_endpoint_ids:
+                redundant_connection_point = copy.deepcopy(connection_point)
+                redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id
+                extended_connection_points.append(redundant_connection_point)
+
+        self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points)))
+
+        """Second step, create the connections and vpn attachments"""
+        for connection_point in extended_connection_points:
+            connection_point_wan_info = self.search_mapp(connection_point)
+            site_network_access = {}
+            connection = {}
+
+            if connection_point["service_endpoint_encapsulation_type"] != "none":
+                if (
+                    connection_point["service_endpoint_encapsulation_type"]
+                    == "dot1q"
+                ):
+                    """The connection is a VLAN"""
+                    connection["encapsulation-type"] = "dot1q-vlan-tagged"
+                    tagged = {}
+                    tagged_interf = {}
+                    service_endpoint_encapsulation_info = connection_point[
+                        "service_endpoint_encapsulation_info"
+                    ]
+
+                    if service_endpoint_encapsulation_info["vlan"] is None:
+                        raise SdnConnectorError("VLAN must be provided")
+
+                    tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[
+                        "vlan"
+                    ]
+                    tagged["dot1q-vlan-tagged"] = tagged_interf
+                    connection["tagged-interface"] = tagged
+                else:
+                    raise NotImplementedError("Encapsulation type not implemented")
+
+            site_network_access["connection"] = connection
+            self.logger.info("Sending connection:{}".format(connection))
+            vpn_attach = {}
+            vpn_attach["vpn-id"] = service_uuid
+            vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role"
+            site_network_access["vpn-attachment"] = vpn_attach
+            self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
+            uuid_sna = str(uuid.uuid4())
+            site_network_access["network-access-id"] = uuid_sna
+            site_network_access["bearer"] = connection_point_wan_info[
+                "service_mapping_info"
+            ]["bearer"]
+
+            access_priority = connection_point_wan_info["service_mapping_info"].get("priority")
+            if access_priority is not None:
+                availability = {}
+                availability["access-priority"] = access_priority
+                availability["single-active"] = [None]
+                site_network_access["availability"] = availability
+
+                constraint = {}
+                constraint['constraint-type'] = 'end-to-end-diverse'
+                constraint['target'] = {'all-other-accesses': [None]}
+
+                access_diversity = {}
+                access_diversity['constraints'] = {'constraint': []}
+                access_diversity['constraints']['constraint'].append(constraint)
+                site_network_access["access-diversity"] = access_diversity
+
+            site_network_accesses = {}
+            site_network_access_list = []
+            site_network_access_list.append(site_network_access)
+            site_network_accesses[
+                "ietf-l2vpn-svc:site-network-access"
+            ] = site_network_access_list
+            conn_info_d = {}
+            conn_info_d["site"] = connection_point_wan_info["service_mapping_info"][
+                "site-id"
+            ]
+            conn_info_d["site-network-access-id"] = site_network_access[
+                "network-access-id"
+            ]
+            conn_info_d["mapping"] = None
+            conn_info.append(conn_info_d)
+
+            try:
+                endpoint_site_network_access_creation = (
+                    "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/"
+                    "sites/site={}/site-network-accesses/".format(
+                        self.wim["wim_url"],
+                        connection_point_wan_info["service_mapping_info"][
+                            "site-id"
+                        ],
+                    )
+                )
+                response_endpoint_site_network_access_creation = requests.post(
+                    endpoint_site_network_access_creation,
+                    headers=self.headers,
+                    json=site_network_accesses,
+                    auth=self.auth,
+                )
+
+                if (
+                    response_endpoint_site_network_access_creation.status_code
+                    == 409
+                ):
+                    self.delete_connectivity_service(vpn_service["vpn-id"])
+
+                    raise SdnConnectorError(
+                        "Site_Network_Access with ID '{}' already exists".format(
+                            site_network_access["network-access-id"]
+                        ),
+                        http_code=response_endpoint_site_network_access_creation.status_code,
+                    )
+                elif (
+                    response_endpoint_site_network_access_creation.status_code
+                    == 400
+                ):
+                    self.delete_connectivity_service(vpn_service["vpn-id"])
+
+                    raise SdnConnectorError(
+                        "Site {} does not exist".format(
+                            connection_point_wan_info["service_mapping_info"][
+                                "site-id"
+                            ]
+                        ),
+                        http_code=response_endpoint_site_network_access_creation.status_code,
+                    )
+                elif (
+                    response_endpoint_site_network_access_creation.status_code
+                    != requests.codes.created
+                    and response_endpoint_site_network_access_creation.status_code
+                    != requests.codes.no_content
+                ):
+                    self.delete_connectivity_service(vpn_service["vpn-id"])
+
+                    raise SdnConnectorError(
+                        "Request not accepted",
+                        http_code=response_endpoint_site_network_access_creation.status_code,
+                    )
+            except requests.exceptions.ConnectionError:
+                self.delete_connectivity_service(vpn_service["vpn-id"])
+
+                raise SdnConnectorError("Request Timeout", http_code=408)
+
+        return conn_info
+
+    def delete_connectivity_service(self, service_uuid, conn_info=None):
+        """Disconnect multi-site endpoints previously connected
+
+        This method should receive as the first argument the UUID generated by
+        the ``create_connectivity_service``
+        """
+        try:
+            self.logger.info("Sending delete")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+                self.wim["wim_url"], service_uuid
+            )
+            response = requests.delete(servicepoint, auth=self.auth)
+
+            if response.status_code != requests.codes.no_content:
+                raise SdnConnectorError(
+                    "Error in the request", http_code=response.status_code
+                )
+        except requests.exceptions.ConnectionError:
+            raise SdnConnectorError("Request Timeout", http_code=408)
+
+    def edit_connectivity_service(
+        self, service_uuid, conn_info=None, connection_points=None, **kwargs
+    ):
+        """Change an existing connectivity service, see
+        ``create_connectivity_service``"""
+        # sites = {"sites": {}}
+        # site_list = []
+        vpn_service = {}
+        vpn_service["svc-topo"] = "any-to-any"
+        counter = 0
+
+        for connection_point in connection_points:
+            site_network_access = {}
+            connection_point_wan_info = self.search_mapp(connection_point)
+            params_site = {}
+            params_site["site-id"] = connection_point_wan_info["service_mapping_info"][
+                "site-id"
+            ]
+            params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
+            device_site = {}
+            device_site["device-id"] = connection_point_wan_info["device-id"]
+            params_site["devices"] = device_site
+            # network_access = {}
+            connection = {}
+
+            if connection_point["service_endpoint_encapsulation_type"] != "none":
+                if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+                    """The connection is a VLAN"""
+                    connection["encapsulation-type"] = "dot1q-vlan-tagged"
+                    tagged = {}
+                    tagged_interf = {}
+                    service_endpoint_encapsulation_info = connection_point[
+                        "service_endpoint_encapsulation_info"
+                    ]
+
+                    if service_endpoint_encapsulation_info["vlan"] is None:
+                        raise SdnConnectorError("VLAN must be provided")
+
+                    tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[
+                        "vlan"
+                    ]
+                    tagged["dot1q-vlan-tagged"] = tagged_interf
+                    connection["tagged-interface"] = tagged
+                else:
+                    raise NotImplementedError("Encapsulation type not implemented")
+
+            site_network_access["connection"] = connection
+            vpn_attach = {}
+            vpn_attach["vpn-id"] = service_uuid
+            vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role"
+            site_network_access["vpn-attachment"] = vpn_attach
+            uuid_sna = conn_info[counter]["site-network-access-id"]
+            site_network_access["network-access-id"] = uuid_sna
+            site_network_access["bearer"] = connection_point_wan_info[
+                "service_mapping_info"
+            ]["bearer"]
+            site_network_accesses = {}
+            site_network_access_list = []
+            site_network_access_list.append(site_network_access)
+            site_network_accesses[
+                "ietf-l2vpn-svc:site-network-access"
+            ] = site_network_access_list
+
+            try:
+                endpoint_site_network_access_edit = (
+                    "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/"
+                    "sites/site={}/site-network-accesses/".format(
+                        self.wim["wim_url"],
+                        connection_point_wan_info["service_mapping_info"]["site-id"],
+                    )
+                )
+                response_endpoint_site_network_access_creation = requests.put(
+                    endpoint_site_network_access_edit,
+                    headers=self.headers,
+                    json=site_network_accesses,
+                    auth=self.auth,
+                )
+
+                if response_endpoint_site_network_access_creation.status_code == 400:
+                    raise SdnConnectorError(
+                        "Service does not exist",
+                        http_code=response_endpoint_site_network_access_creation.status_code,
+                    )
+                elif (
+                    response_endpoint_site_network_access_creation.status_code != 201
+                    and response_endpoint_site_network_access_creation.status_code
+                    != 204
+                ):
+                    raise SdnConnectorError(
+                        "Request no accepted",
+                        http_code=response_endpoint_site_network_access_creation.status_code,
+                    )
+            except requests.exceptions.ConnectionError:
+                raise SdnConnectorError("Request Timeout", http_code=408)
+
+            counter += 1
+
+        return None
+
+    def clear_all_connectivity_services(self):
+        """Delete all WAN Links corresponding to a WIM"""
+        try:
+            self.logger.info("Sending clear all connectivity services")
+            servicepoint = (
+                "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+                    self.wim["wim_url"]
+                )
+            )
+            response = requests.delete(servicepoint, auth=self.auth)
+
+            if response.status_code != requests.codes.no_content:
+                raise SdnConnectorError(
+                    "Unable to clear all connectivity services",
+                    http_code=response.status_code,
+                )
+        except requests.exceptions.ConnectionError:
+            raise SdnConnectorError("Request Timeout", http_code=408)
+
+    def get_all_active_connectivity_services(self):
+        """Provide information about all active connections provisioned by a
+        WIM
+        """
+        try:
+            self.logger.info("Sending get all connectivity services")
+            servicepoint = (
+                "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+                    self.wim["wim_url"]
+                )
+            )
+            response = requests.get(servicepoint, auth=self.auth)
+
+            if response.status_code != requests.codes.ok:
+                raise SdnConnectorError(
+                    "Unable to get all connectivity services",
+                    http_code=response.status_code,
+                )
+
+            return response
+        except requests.exceptions.ConnectionError:
+            raise SdnConnectorError("Request Timeout", http_code=408)
+
+    def get_connectivity_service(self, service_uuid, conn_info=None):
+        """Provide information about a specific connection provisioned by a WIM.
+
+        This method should receive as the first argument the UUID generated by
+        the ``create_connectivity_service``
+        """
+        try:
+            self.logger.info("Sending get connectivity service")
+            servicepoint = (
+                "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+                    self.wim["wim_url"], service_uuid
+                )
+            )
+            response = requests.get(servicepoint, auth=self.auth)
+
+            if response.status_code != requests.codes.ok:
+                raise SdnConnectorError(
+                    "Unable to get connectivity service {:s}".format(str(service_uuid)),
+                    http_code=response.status_code,
+                )
+
+            return response
+        except requests.exceptions.ConnectionError:
+            raise SdnConnectorError("Request Timeout", http_code=408)
diff --git a/src/device/service/drivers/ietf_l2vpn/__init__.py b/src/device/service/drivers/ietf_l2vpn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..38d04994fb0fa1951fb465bc127eb72659dc2eaf
--- /dev/null
+++ b/src/device/service/drivers/ietf_l2vpn/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/device/service/drivers/ietf_l2vpn/acknowledgements.txt b/src/device/service/drivers/ietf_l2vpn/acknowledgements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3a7ed47ad6626ad13f4176bd696ec7e7dbab20ee
--- /dev/null
+++ b/src/device/service/drivers/ietf_l2vpn/acknowledgements.txt
@@ -0,0 +1,3 @@
+IETF L2VPN Driver is based on source code taken from:
+https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-plugin/osm_ro_plugin/sdnconn.py
+https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py
diff --git a/src/device/service/drivers/ietf_l2vpn/sdnconn.py b/src/device/service/drivers/ietf_l2vpn/sdnconn.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1849c9ef3e1a1260ff42bbadabc99f91a6435d7
--- /dev/null
+++ b/src/device/service/drivers/ietf_l2vpn/sdnconn.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+"""The SDN connector is responsible for establishing both wide area network connectivity (WIM)
+and intranet SDN connectivity.
+
+It receives information from ports to be connected .
+"""
+
+import logging
+from http import HTTPStatus
+
+
+class SdnConnectorError(Exception):
+    """Base Exception for all connector related errors
+    provide the parameter 'http_code' (int) with the error code:
+        Bad_Request = 400
+        Unauthorized = 401  (e.g. credentials are not valid)
+        Not_Found = 404    (e.g. try to edit or delete a non existing connectivity service)
+        Forbidden = 403
+        Method_Not_Allowed = 405
+        Not_Acceptable = 406
+        Request_Timeout = 408  (e.g timeout reaching server, or cannot reach the server)
+        Conflict = 409
+        Service_Unavailable = 503
+        Internal_Server_Error = 500
+    """
+
+    def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value):
+        Exception.__init__(self, message)
+        self.http_code = http_code
+
+
+class SdnConnectorBase(object):
+    """Abstract base class for all the SDN connectors
+
+    Arguments:
+        wim (dict): WIM record, as stored in the database
+        wim_account (dict): WIM account record, as stored in the database
+        config
+    The arguments of the constructor are converted to object attributes.
+    An extra property, ``service_endpoint_mapping`` is created from ``config``.
+    """
+
+    def __init__(self, wim, wim_account, config=None, logger=None):
+        """
+        :param wim: (dict). Contains among others 'wim_url'
+        :param wim_account: (dict). Contains among others 'uuid' (internal id), 'name',
+            'sdn' (True if is intended for SDN-assist or False if intended for WIM), 'user', 'password'.
+        :param config: (dict or None): Particular information of plugin. These keys if present have a common meaning:
+            'mapping_not_needed': (bool) False by default or if missing, indicates that mapping is not needed.
+            'service_endpoint_mapping': (list) provides the internal endpoint mapping. The meaning is:
+                KEY                     meaning for WIM             meaning for SDN assist
+                --------                --------                    --------
+                device_id               pop_switch_dpid             compute_id
+                device_interface_id     pop_switch_port             compute_pci_address
+                service_endpoint_id     wan_service_endpoint_id     SDN_service_endpoint_id
+                service_mapping_info    wan_service_mapping_info    SDN_service_mapping_info
+                    contains extra information if needed. Text in Yaml format
+                switch_dpid             wan_switch_dpid             SDN_switch_dpid
+                switch_port             wan_switch_port             SDN_switch_port
+                datacenter_id           vim_account                 vim_account
+            id: (internal, do not use)
+            wim_id: (internal, do not use)
+        :param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used.
+        """
+        self.logger = logger or logging.getLogger("ro.sdn")
+        self.wim = wim
+        self.wim_account = wim_account
+        self.config = config or {}
+        self.service_endpoint_mapping = self.config.get("service_endpoint_mapping", [])
+
+    def check_credentials(self):
+        """Check if the connector itself can access the SDN/WIM with the provided url (wim.wim_url),
+            user (wim_account.user), and password (wim_account.password)
+
+        Raises:
+            SdnConnectorError: Issues regarding authorization, access to
+                external URLs, etc are detected.
+        """
+        raise NotImplementedError
+
+    def get_connectivity_service_status(self, service_uuid, conn_info=None):
+        """Monitor the status of the connectivity service established
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service
+            conn_info (dict or None): Information returned by the connector
+                during the service creation/edition and subsequently stored in
+                the database.
+
+        Returns:
+            dict: JSON/YAML-serializable dict that contains a mandatory key
+                ``sdn_status`` associated with one of the following values::
+
+                    {'sdn_status': 'ACTIVE'}
+                        # The service is up and running.
+
+                    {'sdn_status': 'INACTIVE'}
+                        # The service was created, but the connector
+                        # cannot determine yet if connectivity exists
+                        # (ideally, the caller needs to wait and check again).
+
+                    {'sdn_status': 'DOWN'}
+                        # Connection was previously established,
+                        # but an error/failure was detected.
+
+                    {'sdn_status': 'ERROR'}
+                        # An error occurred when trying to create the service/
+                        # establish the connectivity.
+
+                    {'sdn_status': 'BUILD'}
+                        # Still trying to create the service, the caller
+                        # needs to wait and check again.
+
+                Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
+                keys can be used to provide additional status explanation or
+                new information available for the connectivity service.
+        """
+        raise NotImplementedError
+
+    def create_connectivity_service(self, service_type, connection_points, **kwargs):
+        """
+        Establish SDN/WAN connectivity between the endpoints
+        :param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``.
+        :param connection_points:  (list): each point corresponds to
+            an entry point to be connected. For WIM: from the DC to the transport network.
+            For SDN: Compute/PCI to the transport network. One
+            connection point serves to identify the specific access and
+            some other service parameters, such as encapsulation type.
+            Each item of the list is a dict with:
+                "service_endpoint_id": (str)(uuid)  Same meaning that for 'service_endpoint_mapping' (see __init__)
+                    In case the config attribute mapping_not_needed is True, this value is not relevant. In this case
+                    it will contain the string "device_id:device_interface_id"
+                "service_endpoint_encapsulation_type": None, "dot1q", ...
+                "service_endpoint_encapsulation_info": (dict) with:
+                    "vlan": ..., (int, present if encapsulation is dot1q)
+                    "vni": ... (int, present if encapsulation is vxlan),
+                    "peers": [(ipv4_1), (ipv4_2)] (present if encapsulation is vxlan)
+                    "mac": ...
+                    "device_id": ..., same meaning that for 'service_endpoint_mapping' (see __init__)
+                    "device_interface_id": same meaning that for 'service_endpoint_mapping' (see __init__)
+                    "switch_dpid": ..., present if mapping has been found for this device_id,device_interface_id
+                    "swith_port": ... present if mapping has been found for this device_id,device_interface_id
+                    "service_mapping_info": present if mapping has been found for this device_id,device_interface_id
+        :param kwargs: For future versions:
+            bandwidth (int): value in kilobytes
+            latency (int): value in milliseconds
+            Other QoS might be passed as keyword arguments.
+        :return: tuple: ``(service_id, conn_info)`` containing:
+            - *service_uuid* (str): UUID of the established connectivity service
+            - *conn_info* (dict or None): Information to be stored at the database (or ``None``).
+                This information will be provided to the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+                **MUST** be JSON/YAML-serializable (plain data structures).
+        :raises: SdnConnectorException: In case of error. Nothing should be created in this case.
+            Provide the parameter http_code
+        """
+        raise NotImplementedError
+
+    def delete_connectivity_service(self, service_uuid, conn_info=None):
+        """
+        Disconnect multi-site endpoints previously connected
+
+        :param service_uuid: The one returned by create_connectivity_service
+        :param conn_info: The one returned by last call to 'create_connectivity_service' or 'edit_connectivity_service'
+            if they do not return None
+        :return: None
+        :raises: SdnConnectorException: In case of error. The parameter http_code must be filled
+        """
+        raise NotImplementedError
+
+    def edit_connectivity_service(
+        self, service_uuid, conn_info=None, connection_points=None, **kwargs
+    ):
+        """Change an existing connectivity service.
+
+        This method's arguments and return value follow the same convention as
+        :meth:`~.create_connectivity_service`.
+
+        :param service_uuid: UUID of the connectivity service.
+        :param conn_info: (dict or None): Information previously returned by last call to create_connectivity_service
+            or edit_connectivity_service
+        :param connection_points: (list): If provided, the old list of connection points will be replaced.
+        :param kwargs: Same meaning that create_connectivity_service
+        :return: dict or None: Information to be updated and stored at the database.
+                When ``None`` is returned, no information should be changed.
+                When an empty dict is returned, the database record will be deleted.
+                **MUST** be JSON/YAML-serializable (plain data structures).
+        Raises:
+            SdnConnectorException: In case of error.
+        """
+
+    def clear_all_connectivity_services(self):
+        """Delete all WAN Links in a WIM.
+
+        This method is intended for debugging only, and should delete all the
+        connections controlled by the WIM/SDN, not only the  connections that
+        a specific RO is aware of.
+
+        Raises:
+            SdnConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def get_all_active_connectivity_services(self):
+        """Provide information about all active connections provisioned by a
+        WIM.
+
+        Raises:
+            SdnConnectorException: In case of error.
+        """
+        raise NotImplementedError
diff --git a/src/device/service/drivers/microwave/Tools.py b/src/device/service/drivers/microwave/Tools.py
index 711fb55fd4bd9e1bcb16e851aa73f3a61f4bf4bd..4490c0f63fe6a517e5f31a5acd62208013bbaad0 100644
--- a/src/device/service/drivers/microwave/Tools.py
+++ b/src/device/service/drivers/microwave/Tools.py
@@ -14,7 +14,7 @@
 
 import json, logging, requests
 from requests.auth import HTTPBasicAuth
-from typing import Optional, Set
+from typing import Dict, Optional, Set
 from device.service.driver_api._Driver import RESOURCE_ENDPOINTS
 
 LOGGER = logging.getLogger(__name__)
@@ -43,6 +43,14 @@ def is_exportable_endpoint(node, termination_point_id, links):
             return False
     return True
 
+VLAN_CLASSIFICATION_TYPES = {'ietf-eth-tran-types:vlan-classification', 'vlan-classification'}
+OUTER_TAG_C_TYPE = {'ietf-eth-tran-types:classify-c-vlan', 'classify-c-vlan'}
+def get_vlan_outer_tag(endpoint : Dict) -> Optional[int]:
+    if endpoint.get('service-classification-type', '') not in VLAN_CLASSIFICATION_TYPES: return None
+    outer_tag = endpoint.get('outer-tag', {})
+    if outer_tag.get('tag-type', '') not in OUTER_TAG_C_TYPE: return None
+    return outer_tag.get('vlan-value')
+
 def config_getter(
     root_url : str, resource_key : str, auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None,
     node_ids : Set[str] = set()
@@ -92,7 +100,35 @@ def config_getter(
             for service in service_instances:
                 service_name = service['etht-svc-name']
                 resource_key = '/services/service[{:s}]'.format(service_name)
-                result.append((resource_key, service))
+                resource_value = {'uuid': service.get('etht-svc-name', '<UNDEFINED>')}
+
+                for endpoint in service.get('etht-svc-end-points', []):
+                    _vlan_id = get_vlan_outer_tag(endpoint)
+                    if _vlan_id is not None:
+                        vlan_id = resource_value.get('vlan_id')
+                        if vlan_id is None:
+                            resource_value['vlan_id'] = _vlan_id
+                        elif vlan_id != _vlan_id:
+                            raise Exception('Incompatible VLAN Ids: {:s}'.format(str(service)))
+                    access_points = endpoint.get('etht-svc-access-points', [])
+                    for access_point in access_points:
+                        if access_point['access-point-id'] == '1':
+                            resource_value['node_id_src'] = access_point['access-node-id']
+                            resource_value['tp_id_src']   = access_point['access-ltp-id']
+                        elif access_point['access-point-id'] == '2':
+                            resource_value['node_id_dst'] = access_point['access-node-id']
+                            resource_value['tp_id_dst']   = access_point['access-ltp-id']
+
+                if len(node_ids) > 0:
+                    node_id_src = resource_value.get('node_id_src')
+                    if node_id_src is None: continue
+                    if node_id_src not in node_ids: continue
+
+                    node_id_dst = resource_value.get('node_id_dst')
+                    if node_id_dst is None: continue
+                    if node_id_dst not in node_ids: continue
+
+                result.append((resource_key, resource_value))
         except requests.exceptions.Timeout:
             LOGGER.exception('Timeout connecting {:s}'.format(url))
         except Exception as e:  # pylint: disable=broad-except
diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py
index ac03527529b603089c4f8233cb185f6427e0c360..2399b9ac01258a21a4da6a9aa0e5bc09ea851951 100644
--- a/src/device/service/drivers/openconfig/OpenConfigDriver.py
+++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py
@@ -21,7 +21,7 @@ from apscheduler.job import Job
 from apscheduler.jobstores.memory import MemoryJobStore
 from apscheduler.schedulers.background import BackgroundScheduler
 from ncclient.manager import Manager, connect_ssh
-from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.tools.client.RetryDecorator import delay_exponential
 from common.type_checkers.Checkers import chk_length, chk_string, chk_type, chk_float
 from device.service.driver_api.Exceptions import UnsupportedResourceKeyException
@@ -113,9 +113,11 @@ class NetconfSessionHandler:
                 config, target=target, default_operation=default_operation, test_option=test_option,
                 error_option=error_option, format=format)
 
+    @RETRY_DECORATOR
     def locked(self, target):
         return self.__manager.locked(target=target)
 
+    @RETRY_DECORATOR
     def commit(self, confirmed=False, timeout=None, persist=None, persist_id=None):
         return self.__manager.commit(confirmed=confirmed, timeout=timeout, persist=persist, persist_id=persist_id)
 
@@ -233,23 +235,7 @@ def edit_config(
             results = [e for _ in resources] # if commit fails, set exception in each resource
     return results
 
-HISTOGRAM_BUCKETS = (
-    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
-    0.0001, 0.00025, 0.00050, 0.00075,
-    0.0010, 0.0025, 0.0050, 0.0075,
-    0.0100, 0.0250, 0.0500, 0.0750,
-    0.1000, 0.2500, 0.5000, 0.7500,
-    1.0000, 2.5000, 5.0000, 7.5000,
-    10.0, 25.0, 50.0, 75.0,
-    100.0, INF
-)
 METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'openconfig'})
-METRICS_POOL.get_or_create('GetInitialConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('GetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SubscribeState',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('UnsubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
 
 class OpenConfigDriver(_Driver):
     def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called
diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py
index 02fda8f0e195c267fddb1109f184c8a06e4a6787..f16f0ffcd09a07f6c109328b1c5f0ee101af545a 100644
--- a/src/device/service/drivers/openconfig/templates/EndPoints.py
+++ b/src/device/service/drivers/openconfig/templates/EndPoints.py
@@ -55,5 +55,5 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         add_value_from_collection(endpoint, 'sample_types', sample_types)
 
         if len(endpoint) == 0: continue
-        response.append(('/endpoint[{:s}]'.format(endpoint['uuid']), endpoint))
+        response.append(('/endpoints/endpoint[{:s}]'.format(endpoint['uuid']), endpoint))
     return response
diff --git a/src/device/service/drivers/transport_api/Tools.py b/src/device/service/drivers/transport_api/Tools.py
index 4943648dcab21159b213f9ee938987995be61b0e..bbd4247f0debdd17885c5aadccafc32607e4cbe5 100644
--- a/src/device/service/drivers/transport_api/Tools.py
+++ b/src/device/service/drivers/transport_api/Tools.py
@@ -15,7 +15,7 @@
 import json, logging, operator, requests
 from requests.auth import HTTPBasicAuth
 from typing import Optional
-from device.service.driver_api._Driver import RESOURCE_ENDPOINTS
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_SERVICES
 
 LOGGER = logging.getLogger(__name__)
 
@@ -52,27 +52,74 @@ def config_getter(
         result.append((resource_key, e))
         return result
 
-    if resource_key != RESOURCE_ENDPOINTS: return result
-
-    if 'tapi-common:context' in context:
-        context = context['tapi-common:context']
-    elif 'context' in context:
-        context = context['context']
-
-    for sip in context['service-interface-point']:
-        layer_protocol_name = sip.get('layer-protocol-name', '?')
-        supportable_spectrum = sip.get('tapi-photonic-media:media-channel-service-interface-point-spec', {})
-        supportable_spectrum = supportable_spectrum.get('mc-pool', {})
-        supportable_spectrum = supportable_spectrum.get('supportable-spectrum', [])
-        supportable_spectrum = supportable_spectrum[0] if len(supportable_spectrum) == 1 else {}
-        grid_type = supportable_spectrum.get('frequency-constraint', {}).get('grid-type')
-        granularity = supportable_spectrum.get('frequency-constraint', {}).get('adjustment-granularity')
-        direction = sip.get('direction', '?')
-        endpoint_type = [layer_protocol_name, grid_type, granularity, direction]
-        str_endpoint_type = ':'.join(filter(lambda i: operator.is_not(i, None), endpoint_type))
-        endpoint_url = '/endpoints/endpoint[{:s}]'.format(sip['uuid'])
-        endpoint_data = {'uuid': sip['uuid'], 'type': str_endpoint_type}
-        result.append((endpoint_url, endpoint_data))
+    if resource_key == RESOURCE_ENDPOINTS:
+        if 'tapi-common:context' in context:
+            context = context['tapi-common:context']
+        elif 'context' in context:
+            context = context['context']
+
+        for sip in context['service-interface-point']:
+            layer_protocol_name = sip.get('layer-protocol-name', '?')
+            supportable_spectrum = sip.get('tapi-photonic-media:media-channel-service-interface-point-spec', {})
+            supportable_spectrum = supportable_spectrum.get('mc-pool', {})
+            supportable_spectrum = supportable_spectrum.get('supportable-spectrum', [])
+            supportable_spectrum = supportable_spectrum[0] if len(supportable_spectrum) == 1 else {}
+            grid_type = supportable_spectrum.get('frequency-constraint', {}).get('grid-type')
+            granularity = supportable_spectrum.get('frequency-constraint', {}).get('adjustment-granularity')
+            direction = sip.get('direction', '?')
+
+            endpoint_type = [layer_protocol_name, grid_type, granularity, direction]
+            str_endpoint_type = ':'.join(filter(lambda i: operator.is_not(i, None), endpoint_type))
+            sip_uuid = sip['uuid']
+
+            sip_names = sip.get('name', [])
+            sip_name = next(iter([
+                sip_name['value']
+                for sip_name in sip_names
+                if sip_name['value-name'] == 'local-name'
+            ]), sip_uuid)
+
+            endpoint_url = '/endpoints/endpoint[{:s}]'.format(sip_uuid)
+            endpoint_data = {'uuid': sip_uuid, 'name': sip_name, 'type': str_endpoint_type}
+            result.append((endpoint_url, endpoint_data))
+
+    elif resource_key == RESOURCE_SERVICES:
+        if 'tapi-common:context' in context:
+            context = context['tapi-common:context']
+        elif 'context' in context:
+            context = context['context']
+
+        if 'tapi-connectivity:connectivity-context' in context:
+            context = context['tapi-connectivity:connectivity-context']
+        elif 'connectivity-context' in context:
+            context = context['connectivity-context']
+
+        for conn_svc in context['connectivity-service']:
+            service_uuid = conn_svc['uuid']
+            constraints = conn_svc.get('connectivity-constraint', {})
+            total_req_cap = constraints.get('requested-capacity', {}).get('total-size', {})
+
+            service_url = '/services/service[{:s}]'.format(service_uuid)
+            service_data = {
+                'uuid': service_uuid,
+                'direction': constraints.get('connectivity-direction', 'UNIDIRECTIONAL'),
+                'capacity_unit': total_req_cap.get('unit', '<UNDEFINED>'),
+                'capacity_value': total_req_cap.get('value', '<UNDEFINED>'),
+            }
+
+            for i,endpoint in enumerate(conn_svc.get('end-point', [])):
+                layer_protocol_name = endpoint.get('layer-protocol-name')
+                if layer_protocol_name is not None:
+                    service_data['layer_protocol_name'] = layer_protocol_name
+
+                layer_protocol_qualifier = endpoint.get('layer-protocol-qualifier')
+                if layer_protocol_qualifier is not None:
+                    service_data['layer_protocol_qualifier'] = layer_protocol_qualifier
+
+                sip = endpoint['service-interface-point']['service-interface-point-uuid']
+                service_data['input_sip' if i == 0 else 'output_sip'] = sip
+
+            result.append((service_url, service_data))
 
     return result
 
diff --git a/src/device/service/drivers/transport_api/TransportApiDriver.py b/src/device/service/drivers/transport_api/TransportApiDriver.py
index 8b84274e075e10af04924cefa03768d1c340fb52..1991a34d0d797c48b6c2296435c0ebd0f3a8125a 100644
--- a/src/device/service/drivers/transport_api/TransportApiDriver.py
+++ b/src/device/service/drivers/transport_api/TransportApiDriver.py
@@ -85,9 +85,9 @@ class TransportApiDriver(_Driver):
             for resource in resources:
                 LOGGER.info('resource = {:s}'.format(str(resource)))
 
-                input_sip = find_key(resource, 'input_sip')
-                output_sip = find_key(resource, 'output_sip')
                 uuid = find_key(resource, 'uuid')
+                input_sip = find_key(resource, 'input_sip_uuid')
+                output_sip = find_key(resource, 'output_sip_uuid')
                 capacity_value = find_key(resource, 'capacity_value')
                 capacity_unit = find_key(resource, 'capacity_unit')
                 layer_protocol_name = find_key(resource, 'layer_protocol_name')
diff --git a/src/device/service/drivers/transport_api/__init__.py b/src/device/service/drivers/transport_api/__init__.py
index 2d3f6df3276f063cd9b414f47bba41b656682049..d5073c330b89bed63f08b0da86c4a7649c87b3dd 100644
--- a/src/device/service/drivers/transport_api/__init__.py
+++ b/src/device/service/drivers/transport_api/__init__.py
@@ -12,16 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_SERVICES
 
 ALL_RESOURCE_KEYS = [
     RESOURCE_ENDPOINTS,
-    RESOURCE_INTERFACES,
-    RESOURCE_NETWORK_INSTANCES,
+    RESOURCE_SERVICES,
 ]
-
-RESOURCE_KEY_MAPPINGS = {
-    RESOURCE_ENDPOINTS        : 'component',
-    RESOURCE_INTERFACES       : 'interface',
-    RESOURCE_NETWORK_INSTANCES: 'network_instance',
-}
diff --git a/src/device/service/drivers/xr/README_XR.md b/src/device/service/drivers/xr/README_XR.md
index fa1bc944035d27769cd9c16e0c29318e554e9489..9c64cdef1b773e84153c0d27a58e71af8bdf238f 100644
--- a/src/device/service/drivers/xr/README_XR.md
+++ b/src/device/service/drivers/xr/README_XR.md
@@ -107,7 +107,7 @@ This will make imports to work properly in all cases.
 Run deploy script to build in docker containers and then instantiate to configured K8s cluster. Deploy script must be sources for this to work!
 
 ```bash
-./deploy.sh
+./deploy/all.sh
 ```
 
 If protobuf definitions have changed, regenerate version controlled Java files manually
diff --git a/src/device/service/drivers/xr/XrDriver.py b/src/device/service/drivers/xr/XrDriver.py
index 605f4ce8d0f9c875a4b1736ff0aaa02fcb468778..c1471a8136b0e5cd7791e019bb0bdafd2252f591 100644
--- a/src/device/service/drivers/xr/XrDriver.py
+++ b/src/device/service/drivers/xr/XrDriver.py
@@ -16,10 +16,13 @@
 import logging
 import threading
 import json
-from typing import Any, Iterator, List, Optional, Tuple, Union
+from typing import Any, Iterator, List, Optional, Set, Tuple, Union
 import urllib3
+from common.DeviceTypes import DeviceTypeEnum
 from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum
 from common.type_checkers.Checkers import chk_type
+from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology
 from device.service.driver_api._Driver import _Driver
 from .cm.cm_connection import CmConnection, ConsistencyMode
 from .cm import tf
@@ -45,7 +48,15 @@ class XrDriver(_Driver):
         tls_verify = False # Currently using self signed certificates
         username = settings.get("username", "xr-user-1")
         password = settings.get("password", "xr-user-1")
-        
+
+        # Options are:
+        #    disabled --> just import endpoints as usual
+        #    devices  --> imports sub-devices but not links connecting them.
+        #                 (a remotely-controlled transport domain might exist between them)
+        #    topology --> imports sub-devices and links connecting them.
+        #                 (not supported by XR driver)
+        self.__import_topology = get_import_topology(settings, default=ImportTopologyEnum.DISABLED)
+
         # Options are:
         #    asynchronous --> operation considered complete when IPM responds with suitable status code,
         #                     including "accepted", that only means request is semantically good and queued.
@@ -77,6 +88,7 @@ class XrDriver(_Driver):
     def Disconnect(self) -> bool:
         LOGGER.info(f"Disconnect[{self}]")
         with self.__lock:
+            self.__cm_connection.stop_monitoring_errors()
             self.__terminate.set()
             return True
 
@@ -98,7 +110,56 @@ class XrDriver(_Driver):
             constellation = self.__cm_connection.get_constellation_by_hub_name(self.__hub_module_name)
             if constellation:
                 self.__constellation = constellation
-                return [(f"/endpoints/endpoint[{ifname}]", {'uuid': ifname, 'type': 'optical', 'sample_types': {}}) for ifname in constellation.ifnames()]
+                if self.__import_topology == ImportTopologyEnum.DISABLED:
+                    return [
+                        (f"/endpoints/endpoint[{ifname}]", {'uuid': ifname, 'type': 'optical', 'sample_types': {}})
+                        for ifname in constellation.ifnames()
+                    ]
+                elif self.__import_topology == ImportTopologyEnum.DEVICES:
+                    devices : Set[str] = set()
+                    pluggables : Set[str] = set()
+                    devices_and_endpoints = []
+                    for ifname in constellation.ifnames():
+                        device_name,pluggable_name = ifname.split('|')
+
+                        if device_name not in devices:
+                            device_url = '/devices/device[{:s}]'.format(device_name)
+                            device_data = {
+                                'uuid': device_name, 'name': device_name,
+                                'type': DeviceTypeEnum.EMULATED_PACKET_ROUTER.value,
+                                'status': DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED,
+                                'drivers': [DeviceDriverEnum.DEVICEDRIVER_UNDEFINED],
+                            }
+                            devices_and_endpoints.append((device_url, device_data))
+
+                            for copper_if_index in range(4):
+                                copper_ifname = '1/{:d}'.format(copper_if_index + 1)
+                                endpoint_url = '/endpoints/endpoint[{:s}]'.format(copper_ifname)
+                                endpoint_data = {
+                                    'device_uuid': device_name, 'uuid': copper_ifname, 'name': copper_ifname,
+                                    'type': 'copper/internal', 'sample_types': {}
+                                }
+                                devices_and_endpoints.append((endpoint_url, endpoint_data))
+
+                            devices.add(device_name)
+
+                        if ifname not in pluggables:
+                            endpoint_url = '/endpoints/endpoint[{:s}]'.format(ifname)
+                            if 'hub' in ifname.lower():
+                                endpoint_type = 'optical/xr-hub'
+                            elif 'leaf' in ifname.lower():
+                                endpoint_type = 'optical/xr-leaf'
+                            else:
+                                endpoint_type = 'optical/xr'
+                            endpoint_data = {
+                                'device_uuid': device_name, 'uuid': pluggable_name, 'name': pluggable_name,
+                                'type': endpoint_type, 'sample_types': {}
+                            }
+                            devices_and_endpoints.append((endpoint_url, endpoint_data))
+
+                    return devices_and_endpoints
+                else:
+                    raise Exception('Unsupported import_topology mode: {:s}'.format(str(self.__import_topology)))
             else:
                 return []
 
diff --git a/src/device/service/drivers/xr/cm-cli.py b/src/device/service/drivers/xr/cm-cli.py
index 924ca0c966bbefd8b72c655ea788bdfd0ed08c5d..9aefe969c0549819568882a6215ae3dd86b7df3b 100755
--- a/src/device/service/drivers/xr/cm-cli.py
+++ b/src/device/service/drivers/xr/cm-cli.py
@@ -16,16 +16,22 @@
 
 # Test program for CmConnection
 import argparse
+import signal
 import logging
 import traceback
+import threading
 from typing import Tuple
-from cm.cm_connection import CmConnection, ConsistencyMode
+from cm.cm_connection import CmConnection, ConsistencyMode, ErrorFromIpm
 from cm.tf_service import TFService
 from cm.transport_capacity import TransportCapacity
 from cm.connection import Connection
 import cm.tf as tf
+import asyncio
+import websockets
+import ssl
+import time
 
-logging.basicConfig(level=logging.INFO)
+logging.basicConfig(level=logging.WARNING)
 
 parser = argparse.ArgumentParser(description='CM Connectin Test Utility')
 parser.add_argument('ip', help='CM IP address or domain name')
@@ -33,6 +39,7 @@ parser.add_argument('port', help='CM port', type=int)
 parser.add_argument('username', help='Username')
 parser.add_argument('password', help='Password')
 
+parser.add_argument('--monitor-errors', action='store_true')
 parser.add_argument('--list-constellations', action='store_true')
 parser.add_argument('--show-constellation-by-hub-name', nargs='?', type=str)
 parser.add_argument('--create-connection', nargs='?', type=str, help="uuid;ifname;ifname;capacity")
@@ -84,98 +91,118 @@ else:
     retry_interval = 0.2
 
 cm = CmConnection(args.ip, args.port, args.username, args.password, timeout=args.timeout, tls_verify=False, consistency_mode=consistency_mode, retry_interval=retry_interval)
-if not cm.Connect():
-    exit(-1)
-
-if args.list_constellations:
-    constellations = cm.list_constellations()
-    for constellation in constellations:
-        print("Constellation:", constellation.constellation_id)
-        for if_name in constellation.ifnames():
-            print(f"    {if_name}")
-
-if args.show_constellation_by_hub_name:
-    constellation = cm.get_constellation_by_hub_name(args.show_constellation_by_hub_name)
-    if constellation:
-        print(f"Constellation: {constellation.constellation_id},  traffic-mode: {constellation.traffic_mode}")
-        for if_name in constellation.ifnames():
-            print(f"    {if_name}")
-
-if args.create_connection:
-    tf_service = cli_create_string_to_tf_service(args.create_connection)
-    connection = Connection(from_tf_service=tf_service)
-    created_service = cm.create_connection(connection)
-    if created_service:
-        print(f"Created {created_service} for {connection}")
-    else:
-        print(f"Failed to create {connection}")
-
-if args.modify_connection:
-    href, tf_service = cli_modify_string_to_tf_service(args.modify_connection)
-    mc_args = args.modify_connection.split(";")
-    connection = Connection(from_tf_service=tf_service)
-    result = cm.update_connection(href, connection)
-    if result:
-        print(f"Updated {href} for {connection}")
-    else:
-        print(f"Failed to update {href} for {connection}")
-
-if args.show_connection_by_name:
-    connection = cm.get_connection_by_name(args.show_connection_by_name)
-    if connection:
-        print(str(connection))
-
-if args.list_connections:
-    connections = cm.get_connections()
-    for c in connections:
-        print(str(c))
-
-if args.delete_connection:
-    was_deleted = cm.delete_connection(args.delete_connection)
-    if was_deleted:
-        print(f"Successfully deleted {args.delete_connection}")
-    else:
-        print(f"Failed to delete {args.delete_connection}")
-
-if args.list_transport_capacities:
-    tcs = cm.get_transport_capacities()
-    for tc in tcs:
-        print(str(tc))
-
-if args.create_transport_capacity:
-    tf_service = cli_create_string_to_tf_service(args.create_transport_capacity)
-    tc = TransportCapacity(from_tf_service=tf_service)
-    created_service = cm.create_transport_capacity(tc)
-    if created_service:
-        print(f"Created {created_service} for {tc}")
-    else:
-        print(f"Failed to create {tc}")
-
-if args.emulate_tf_set_config_service:
-    eargs = args.emulate_tf_set_config_service.split(";")
-    if len(eargs) < 5:
-        print("Mandatory tokens missing for --emulate-tf-set-config-service")
-        exit(-1)
 
-    hub_module_name, uuid, input_sip, output_sip, capacity_value  = eargs[0:5]
-    capacity_value = int(capacity_value)
-    config = {
-        "input_sip": input_sip,
-        "output_sip": output_sip,
-        "capacity_value": capacity_value,
-        "capacity_unit": "gigabit"
-    }
+terminate = threading.Event()
+def signal_handler(sig, frame):
+    cm.stop_monitoring_errors()
+    terminate.set()
 
-    constellation = cm.get_constellation_by_hub_name(hub_module_name)
+signal.signal(signal.SIGINT, signal_handler)
 
-    # Allow testing some of the VTI code before we have CM that has VTI
-    if len(eargs) > 5 and eargs[5] == "FORCE-VTI-ON":
-        constellation.traffic_mode = "VTIMode"
-
-    if constellation is None:
-        print(f"Unable to find constellation for hub-module {hub_module_name}")
+try:
+    if not cm.Connect():
         exit(-1)
-    result = tf.set_config_for_service(cm, constellation, uuid, config)
-    print(f"Emulated SetConfig() for service result: {result}")
-    if isinstance(result, Exception):
-        traceback.print_exception(result)
+
+    if args.list_constellations:
+        constellations = cm.list_constellations()
+        for constellation in constellations:
+            print("Constellation:", constellation.constellation_id)
+            for if_name in constellation.ifnames():
+                print(f"    {if_name}")
+
+    if args.show_constellation_by_hub_name:
+        constellation = cm.get_constellation_by_hub_name(args.show_constellation_by_hub_name)
+        if constellation:
+            print(f"Constellation: {constellation.constellation_id},  traffic-mode: {constellation.traffic_mode}")
+            for if_name in constellation.ifnames():
+                print(f"    {if_name}")
+
+    if args.create_connection:
+        tf_service = cli_create_string_to_tf_service(args.create_connection)
+        connection = Connection(from_tf_service=tf_service)
+        try:
+            created_service = cm.create_connection(connection)
+            if created_service:
+                print(f"Created {created_service} for {connection}")
+            else:
+                print(f"Failed to create {connection}")
+        except ErrorFromIpm as ipm_err:
+            print(f"Failed to create {connection}: {str(ipm_err)}")
+
+    if args.modify_connection:
+        href, tf_service = cli_modify_string_to_tf_service(args.modify_connection)
+        mc_args = args.modify_connection.split(";")
+        connection = Connection(from_tf_service=tf_service)
+        result = cm.update_connection(href, connection)
+        if result:
+            print(f"Updated {href} for {connection}")
+        else:
+            print(f"Failed to update {href} for {connection}")
+
+    if args.show_connection_by_name:
+        connection = cm.get_connection_by_name(args.show_connection_by_name)
+        if connection:
+            print(str(connection))
+
+    if args.list_connections:
+        connections = cm.get_connections()
+        for c in connections:
+            print(str(c))
+
+    if args.delete_connection:
+        was_deleted = cm.delete_connection(args.delete_connection)
+        if was_deleted:
+            print(f"Successfully deleted {args.delete_connection}")
+        else:
+            print(f"Failed to delete {args.delete_connection}")
+
+    if args.list_transport_capacities:
+        tcs = cm.get_transport_capacities()
+        for tc in tcs:
+            print(str(tc))
+
+    if args.create_transport_capacity:
+        tf_service = cli_create_string_to_tf_service(args.create_transport_capacity)
+        tc = TransportCapacity(from_tf_service=tf_service)
+        created_service = cm.create_transport_capacity(tc)
+        if created_service:
+            print(f"Created {created_service} for {tc}")
+        else:
+            print(f"Failed to create {tc}")
+
+    if args.emulate_tf_set_config_service:
+        eargs = args.emulate_tf_set_config_service.split(";")
+        if len(eargs) < 5:
+            print("Mandatory tokens missing for --emulate-tf-set-config-service")
+            exit(-1)
+
+        hub_module_name, uuid, input_sip, output_sip, capacity_value  = eargs[0:5]
+        capacity_value = int(capacity_value)
+        config = {
+            "input_sip_name": input_sip,
+            "output_sip_name": output_sip,
+            "capacity_value": capacity_value,
+            "capacity_unit": "gigabit"
+        }
+
+        constellation = cm.get_constellation_by_hub_name(hub_module_name)
+
+        # Allow testing some of the VTI code before we have CM that has VTI
+        if len(eargs) > 5 and eargs[5] == "FORCE-VTI-ON":
+            constellation.traffic_mode = "VTIMode"
+
+        if constellation is None:
+            print(f"Unable to find constellation for hub-module {hub_module_name}")
+            exit(-1)
+        result = tf.set_config_for_service(cm, constellation, uuid, config)
+        print(f"Emulated SetConfig() for service result: {result}")
+        if isinstance(result, Exception):
+            traceback.print_exception(result)
+
+    if args.monitor_errors:
+        cm.print_received_errors = True
+        terminate.wait()
+
+finally:
+# Delete subscriptions. It will end monitoring thread and ensure that program terminates normally
+    cm.stop_monitoring_errors()
diff --git a/src/device/service/drivers/xr/cm/cm_connection.py b/src/device/service/drivers/xr/cm/cm_connection.py
index 7128494510f40914917d2c3981158b6dd3571c70..bcd62862de82f115c7c1ef7e98039e6398e62891 100644
--- a/src/device/service/drivers/xr/cm/cm_connection.py
+++ b/src/device/service/drivers/xr/cm/cm_connection.py
@@ -15,9 +15,13 @@
 
 from __future__ import annotations
 import collections.abc
+import threading
 import logging
 import json
 import time
+import asyncio
+import websockets
+import ssl
 from typing import Optional, List, Dict, Union
 import re
 import requests
@@ -51,6 +55,55 @@ class ExpiringValue:
 class UnexpectedEmptyBody(Exception):
     pass
 
+class ExternalError(Exception):
+    pass
+
+class ApiErrorFromIpm(Exception):
+    pass
+
+class ErrorFromIpm(ExternalError):
+    def __init__(self, err_dict):
+        msg = str(err_dict)
+        # Try to extract a short error message
+        try:
+            # Only look at first message
+            err_messages = err_dict["errors"]["errors"][0]["messages"]
+            for err_msg in err_messages:
+                if err_msg["lang"] == "en":
+                    msg = err_msg["message"]
+        except KeyError:
+            pass
+        except IndexError:
+            pass
+        super().__init__(msg)
+
+class CreateConsistencyError(Exception):
+    pass
+
+class ErrorStore:
+    def __init__(self):
+        self.__lock = threading.Lock()
+        self.__db={}
+        self.__enabled=False
+
+    def get_error(self, uri: str) -> Optional[dict]:
+        with self.__lock:
+            return self.__db.pop(uri, None)
+
+    def set_error(self, uri: str, err_dict: dict):
+        with self.__lock:
+            if self.__enabled:
+                self.__db[uri] = err_dict
+
+    def enable(self):
+        with self.__lock:
+            self.__enabled = True
+
+    def disable(self):
+        with self.__lock:
+            self.__enabled = False
+            self.__db.clear()
+
 # This is enum, not a regular class, see https://docs.python.org/3/library/enum.html
 # String based enums require python 3.11, so use nunber based and custom parser
 class ConsistencyMode(Enum):
@@ -134,10 +187,25 @@ class HttpResult:
 
         return True
 
+    def raise_as_exception(self):
+        if self.exception is not None:
+            raise ExternalError(f"Failure for request {str(self)}") from self.exception
+
+        status_code = self.status_code if self.status_code is not None else "<not executed>"
+
+        # Try to get error message from IPM
+        if self.json is not None and "errors" in self.json:
+            err_list = self.json["errors"]
+            if len(err_list) > 0 and "message" in err_list[0]:
+                err_msg = err_list[0]["message"]
+                raise ApiErrorFromIpm(f"{self.method} {self.url} {self.params},  status {status_code}, IPM reported error: {err_msg}")
+            
+        raise ExternalError(str(self))
+
 class CmConnection:
     CONSISTENCY_WAIT_LOG_INTERVAL = 1.0
 
-    def __init__(self, address: str, port: int, username: str, password: str, timeout=30, tls_verify=True, consistency_mode: ConsistencyMode = ConsistencyMode.asynchronous, retry_interval: float=0.2, max_consistency_tries:int = 100_000) -> None:
+    def __init__(self, address: str, port: int, username: str, password: str, timeout=30, tls_verify=True, consistency_mode: ConsistencyMode = ConsistencyMode.asynchronous, retry_interval: float=0.2, max_consistency_tries:int = 100_000, monitor_error_stream: bool = True) -> None:
         self.__tls_verify = tls_verify
         if not tls_verify:
             urllib3.disable_warnings()
@@ -151,7 +219,18 @@ class CmConnection:
         self.__username = username
         self.__password = password
         self.__cm_root = 'https://' + address + ':' + str(port)
+        self.__cm_ws_root = 'wss://' + address + ':' + str(port)
         self.__access_token = None
+        self.__monitor_error_stream = monitor_error_stream
+        self.__err_store=ErrorStore()
+        self.__err_monitor_thread = None
+        self.__err_monitor_connected = threading.Event()
+        self.__err_monitor_sub_id = None
+        self.__err_monitor_terminate = threading.Event()
+        self.print_received_errors = False
+
+    def __del__(self):
+        self.stop_monitoring_errors()
 
     def __perform_request(self, http_result: HttpResult, permit_empty_body: bool,  fn, *args, **kwargs):
         try:
@@ -238,7 +317,121 @@ class CmConnection:
             self.__acquire_access_token()
 
     def Connect(self) -> bool:
-        return self.__acquire_access_token()
+        if not self.__acquire_access_token():
+            return False
+        return self.monitor_errors() if self.__monitor_error_stream else True
+
+    def subscribe_errors(self):
+        sub = [
+            {
+                "subscriptionName": "TfXrDriverErrorMonitopr",
+                "subscriptionFilters": [
+                    {
+                        "requestedNotificationTypes": [ "Error" ],
+                        "requestedResources": [
+                            {
+                                "resourceType": "cm.network-connection",
+                            }
+                        ]
+                    },
+                ]
+            }
+        ]
+
+        r = self.__post("/api/v1/subscriptions/events", sub)
+        #print(r.status_code, r.text)
+        if not r.is_valid_json_list_with_status(201) or len(r.json) != 1:
+            return None, None
+        try:
+            return self.__cm_ws_root + r.json[0]["notificationChannel"]["streamAddress"], r.json[0]["subscriptionId"]
+        except KeyError:
+            return None, None
+
+    def unsubscribe(self, sub_id: str):
+        resp = self.__delete(f"/api/v1/subscriptions/events/{sub_id}")
+        if resp.is_valid_with_status_ignore_body(202):
+            LOGGER.info(f"Deleted subscription {sub_id=}")
+            return True
+        else:
+            LOGGER.info(f"Deleting subscription {sub_id=} failed, status {resp.status_code}")
+            return False
+
+    def monitor_errors(self) -> bool:
+        uri, sub_id = self.subscribe_errors()
+        if not uri or not sub_id:
+            return False
+        self.__err_monitor_sub_id = sub_id
+
+        def err_monitor_thread():
+            LOGGER.info(f"Listening errors via {uri}")
+
+            ctx = ssl.create_default_context()
+            if not self.__tls_verify:
+                ctx.check_hostname = False
+                ctx.verify_mode = ssl.CERT_NONE
+
+            async def receive_websock(uri, ssl_ctx):
+                while not self.__err_monitor_terminate.is_set():
+                    try:
+                        async with websockets.connect(uri, ssl=ssl_ctx) as websocket:
+                            LOGGER.info(f"err_monitor_thread(): WebSock connected to {uri}")
+                            self.__err_monitor_connected.set()
+                            while not self.__err_monitor_terminate.is_set():
+                                # 5s timeout is used for forced checking of terminate flag
+                                # In normal termmination timeout is not experienced, as
+                                # we unsubscribe and that will trigger server to close the
+                                # connection. This timeout exists purely as backup
+                                # in case unsubscribe fails
+                                try:
+                                    msg = await asyncio.wait_for(websocket.recv(), timeout=5.0)
+                                except asyncio.exceptions.TimeoutError:
+                                    continue
+                                if self.print_received_errors:
+                                    print(f"RX: {msg}")
+                                try:
+                                    msg_json = json.loads(msg)
+                                    href = msg_json["href"]
+                                    LOGGER.debug(f"err_monitor_thread(): RX [{href}]: {msg}")
+                                    self.__err_store.set_error(href, msg_json)
+                                except json.JSONDecodeError as json_err:
+                                    LOGGER.error(f"err_monitor_thread(): Invalid message received: {msg}, JSON decode error {str(json_err)}")
+                                except KeyError:
+                                    LOGGER.error(f"err_monitor_thread(): Missing href in message: {msg}")
+                    except asyncio.CancelledError as e:
+                        LOGGER.debug("err_monitor_thread(): monitoring cancelled")
+                        raise e
+                    except Exception as e:
+                        if not self.__err_monitor_terminate.is_set():
+                            LOGGER.error(f"err_monitor_thread(): exception {str(e)}, reconnecting")
+                            time.sleep(1)
+
+            asyncio.run(receive_websock(uri, ctx))
+            LOGGER.debug("err_monitor_thread(): thread terminating")
+
+        assert self.__err_monitor_thread is None
+        self.__err_monitor_terminate.clear()
+        self.__err_monitor_thread = threading.Thread(target=err_monitor_thread)
+        self.__err_monitor_thread.start()
+        # If we can get connection soon, wait for it, otherwise proceed without delay
+        # Not waiting for connection may miss some errors (-->timeout later), waiting too long
+        # makes for bad experience
+        self.__err_monitor_connected.wait(0.5)
+
+        return True
+
+    def stop_monitoring_errors(self):
+        self.__err_monitor_terminate.set()
+
+        if self.__err_monitor_sub_id:
+            LOGGER.debug(f"Disabling error subscribtion {self.__err_monitor_sub_id }")
+            self.unsubscribe(self.__err_monitor_sub_id)
+            self.__err_monitor_sub_id = None
+
+        if self.__err_monitor_thread is not None:
+            LOGGER.debug("Terminating error monitoring thread")
+            self.__err_monitor_thread.join()
+            LOGGER.info("Error monitoring thread terminated")
+            self.__err_monitor_thread = None
 
     def list_constellations(self) -> List[Constellation]:
         r = self.__get("/api/v1/xr-networks?content=expanded")
@@ -246,7 +439,6 @@ class CmConnection:
             return []
         return [Constellation(c) for c in r.json]
 
-
     def get_constellation_by_hub_name(self, hub_module_name: str) -> Optional[Constellation]:
         qparams = [
             ('content', 'expanded'),
@@ -324,6 +516,11 @@ class CmConnection:
                         log_ts = ts
                         LOGGER.info(f"apply_create_consistency(): waiting for life cycle state progress for {get_result}, current: {str(get_result.life_cycle_info)}, ellapsed time {ts-ts_start} seconds")
             else:
+                err_info = self.__err_store.get_error(obj.href)
+                if err_info is not None:
+                    LOGGER.info(f"apply_create_consistency(): asynchronous error reported for {obj}: {str(err_info)}")
+                    raise ErrorFromIpm(err_info)
+
                 ts = time.perf_counter()
                 if ts - log_ts >= self.CONSISTENCY_WAIT_LOG_INTERVAL:
                     log_ts = ts
@@ -337,10 +534,13 @@ class CmConnection:
         duration = time.perf_counter() - ts_start
         if not valid:
             if get_result:
-                LOGGER.info(f"Failed to apply create consistency for {get_result}, insufficient life-cycle-state progress ({str(get_result.life_cycle_info)}), duration {duration} seconds")
+                msg = f"Failed to apply create consistency for {get_result}, insufficient life-cycle-state progress ({str(get_result.life_cycle_info)}), duration {duration} seconds"
+                LOGGER.info(msg)
+                raise CreateConsistencyError(msg)
             else:
-                LOGGER.info(f"Failed to apply create consistency for {obj}, REST object did not appear, duration {duration} seconds")
-            return None
+                msg = f"Failed to apply create consistency for {obj}, REST object did not appear, duration {duration} seconds"
+                LOGGER.info(msg)
+                raise CreateConsistencyError(msg)
         else:
             LOGGER.info(f"Applied create consistency for {get_result}, final life-cycle-state {str(get_result.life_cycle_info)}, duration {duration} seconds")
 
@@ -399,20 +599,24 @@ class CmConnection:
         # Create wants a list, so wrap connection to list
         cfg = [connection.create_config()]
 
-        resp = self.__post("/api/v1/network-connections", cfg)
-        if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]:
-            connection.href = resp.json[0]["href"]
-            LOGGER.info(f"IPM accepted create request for connection {connection}")
-            new_connection = self.apply_create_consistency(connection, lambda: self.get_connection_by_href(connection.href))
-            if new_connection:
-                LOGGER.info(f"Created connection {new_connection}")
-                return new_connection.href
+        self.__err_store.enable()
+        try:
+            resp = self.__post("/api/v1/network-connections", cfg)
+            if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]:
+                connection.href = resp.json[0]["href"]
+                LOGGER.info(f"IPM accepted create request for connection {connection}")
+                new_connection = self.apply_create_consistency(connection, lambda: self.get_connection_by_href(connection.href))
+                if new_connection:
+                    LOGGER.info(f"Created connection {new_connection}")
+                    return new_connection.href
+                else:
+                    LOGGER.error(f"Consistency failure for connection {connection}, result {resp}")
+                    return None
             else:
-                LOGGER.error(f"Consistency failure for connection {connection}, result {resp}")
-                return None
-        else:
-            LOGGER.error(f"Create failure for connection {connection}, result {resp}")
-            return None
+                LOGGER.error(f"Create failure for connection {connection}, result {resp}")
+                resp.raise_as_exception()
+        finally:
+            self.__err_store.disable()
 
     def update_connection(self, href: str, connection: Connection, existing_connection: Optional[Connection]=None) -> Optional[str]:
         cfg = connection.create_config()
diff --git a/src/device/service/drivers/xr/cm/tests/test_cm_connection.py b/src/device/service/drivers/xr/cm/tests/test_cm_connection.py
index a7944ed220c6d68aad2f122a0bb0d2c1f83fdd06..22b74f36a60e3eda4a0d08d9791cae112b7fd605 100644
--- a/src/device/service/drivers/xr/cm/tests/test_cm_connection.py
+++ b/src/device/service/drivers/xr/cm/tests/test_cm_connection.py
@@ -37,30 +37,30 @@ def test_cmc_connect():
     # Valid access token
     with requests_mock.Mocker() as m:
         m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=access_token)
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, monitor_error_stream=False)
         assert cm.Connect()
 
     # Valid JSON but no access token
     with requests_mock.Mocker() as m:
         m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=r'{"a": "b"}')
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, monitor_error_stream=False)
         assert not cm.Connect()
 
     # Invalid JSON
     with requests_mock.Mocker() as m:
         m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=r'}}}')
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, monitor_error_stream=False)
         assert not cm.Connect()
 
     with requests_mock.Mocker() as m:
         # No mock present for the destination
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, monitor_error_stream=False)
         assert not cm.Connect()
 
 def test_cmc_get_constellations():
     with mock_cm_connectivity() as m:
         m.get("https://127.0.0.1:9999/api/v1/xr-networks?content=expanded", text=res_constellations)
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, monitor_error_stream=False)
         assert cm.Connect()
 
         # List all constellations
diff --git a/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py b/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py
index e9b16b62034bcd42061907d920b757b59766f562..42785caad79f4ba6000877e81d3caf403c463c1a 100644
--- a/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py
+++ b/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py
@@ -20,7 +20,7 @@ import traceback
 import copy
 import requests_mock
 
-from ..cm_connection import CmConnection, ConsistencyMode
+from ..cm_connection import CmConnection, ConsistencyMode, CreateConsistencyError
 from ..tf import set_config_for_service
 
 access_token = r'{"access_token":"eyI3...","expires_in":3600,"refresh_expires_in":0,"refresh_token":"ey...","token_type":"Bearer","not-before-policy":0,"session_state":"f6e235c4-4ca4-4258-bede-4f2b7125adfb","scope":"profile email offline_access"}'
@@ -44,20 +44,23 @@ def mock_cm():
 
 uuid = "12345ABCDEFGHIJKLMN"
 config = {
-    "input_sip": "XR HUB 1|XR-T4;",
-    "output_sip": "XR LEAF 1|XR-T1",
+    "input_sip_name": "XR HUB 1|XR-T4;",
+    "output_sip_name": "XR LEAF 1|XR-T1",
     "capacity_value": 125,
     "capacity_unit": "gigabit"
 }
 
 def _validate_result(result, expect):
-    if isinstance(result, Exception):
-        traceback.print_exception(result)
-    assert result is expect # Not, "is", not ==, we want type checking in this case, as also an exception can be returned (as return value)
+    if isinstance(expect, Exception):
+        assert type(result) == type(expect)
+    else:
+        if isinstance(result, Exception):
+            traceback.print_exception(result)
+        assert result is expect # Not, "is", not ==, we want type checking in this case, as also an exception can be returned (as return value)
 
 def test_xr_set_config():
     with mock_cm() as m:
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, monitor_error_stream=False)
         assert cm.Connect()
 
         constellation = cm.get_constellation_by_hub_name("XR HUB 1")
@@ -86,7 +89,7 @@ def repeat_last_expected(expected: list[tuple], called: list[tuple]) -> list[tup
 
 def test_xr_set_config_consistency_lifecycle():
     with mock_cm() as m:
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, consistency_mode=ConsistencyMode.lifecycle, retry_interval=0, timeout=1, max_consistency_tries=3)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, consistency_mode=ConsistencyMode.lifecycle, retry_interval=0, timeout=1, max_consistency_tries=3, monitor_error_stream=False)
         assert cm.Connect()
 
         constellation = cm.get_constellation_by_hub_name("XR HUB 1")
@@ -125,7 +128,7 @@ def test_xr_set_config_consistency_lifecycle():
                { 'json': json_non_terminal, 'status_code': 200 }])
 
         result = set_config_for_service(cm, constellation, uuid, config)
-        _validate_result(result, False) # Service creation failure due to insufficient progress
+        _validate_result(result, CreateConsistencyError("")) # Service creation failure due to insufficient progress
 
         called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history]
         expected_mocks_no_connect = [
@@ -139,7 +142,7 @@ def test_xr_set_config_consistency_lifecycle():
         ################################################################################
         # Same as before, but CmConnection no longer requiring lifcycle progress
         m.reset_mock()
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, consistency_mode=ConsistencyMode.synchronous, retry_interval=0, timeout=1, max_consistency_tries=3)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, consistency_mode=ConsistencyMode.synchronous, retry_interval=0, timeout=1, max_consistency_tries=3, monitor_error_stream=False)
         assert cm.Connect()
         constellation = cm.get_constellation_by_hub_name("XR HUB 1")
         assert constellation
@@ -154,21 +157,21 @@ def test_xr_set_config_consistency_lifecycle():
         ################################################################################
         # Same as above, but without REST object appearing
         m.reset_mock()
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, consistency_mode=ConsistencyMode.synchronous, retry_interval=0, timeout=1, max_consistency_tries=3)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, consistency_mode=ConsistencyMode.synchronous, retry_interval=0, timeout=1, max_consistency_tries=3, monitor_error_stream=False)
         assert cm.Connect()
         constellation = cm.get_constellation_by_hub_name("XR HUB 1")
         assert constellation
         m.get("https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432",
               [{'text': '', 'status_code': 401}])
         result = set_config_for_service(cm, constellation, uuid, config)
-        _validate_result(result, False)
+        _validate_result(result, CreateConsistencyError(""))
         called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history]
         assert called_mocks == repeat_last_expected(expected_mocks[:2] + expected_mocks_no_connect, called_mocks)
 
 
 def test_xr_set_config_update_case():
     with mock_cm() as m:
-        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False)
+        cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False, monitor_error_stream=False)
         assert cm.Connect()
 
         constellation = cm.get_constellation_by_hub_name("XR HUB 1")
diff --git a/src/device/service/drivers/xr/cm/tf.py b/src/device/service/drivers/xr/cm/tf.py
index c44cb0c9f3ce0e755ce375908a520374e639e40f..4b1352216d79aea46beca8c5383f64f39869f91b 100644
--- a/src/device/service/drivers/xr/cm/tf.py
+++ b/src/device/service/drivers/xr/cm/tf.py
@@ -15,7 +15,7 @@
 
 from typing import Dict, Union
 import logging
-from .cm_connection import CmConnection
+from .cm_connection import CmConnection, ExternalError
 from .constellation import Constellation
 from .tf_service import TFService
 from .transport_capacity import TransportCapacity
@@ -38,7 +38,7 @@ def _get_capacity(config) -> int:
 
 def set_config_for_service(cm_connection: CmConnection, constellation: Constellation, uuid: str, config: Dict[str, any]) -> Union[bool, Exception]:
     try:
-        service = TFService(uuid, config["input_sip"], config["output_sip"], _get_capacity(config))
+        service = TFService(uuid, config["input_sip_name"], config["output_sip_name"], _get_capacity(config))
         if constellation.is_vti_mode():
             desired_tc = TransportCapacity(from_tf_service=service)
             active_tc = cm_connection.get_transport_capacity_by_name(service.name())
@@ -57,13 +57,17 @@ def set_config_for_service(cm_connection: CmConnection, constellation: Constella
                         LOGGER.error(f"set_config_for_service: Failed to create Transport Capacity ({desired_tc=})")
                         return False
         connection = Connection(from_tf_service=service)
-        href = cm_connection.create_or_update_connection(connection)
-        if href:
-            LOGGER.info(f"set_config_for_service: Created service {uuid} as {href} (connection={str(connection)})")
-            return True
-        else:
-            LOGGER.error(f"set_config_for_service: Service creation failure for {uuid} (connection={str(connection)})")
-            return False
+        try:
+            href = cm_connection.create_or_update_connection(connection)
+            if href:
+                LOGGER.info(f"set_config_for_service: Created service {uuid} as {href} (connection={str(connection)})")
+                return True
+            else:
+                LOGGER.error(f"set_config_for_service: Service creation failure for {uuid} (connection={str(connection)})")
+                return False
+        except ExternalError as e:
+            LOGGER.error(f"set_config_for_service: Service creation failure for {uuid} (connection={str(connection)}): {str(e)}")
+            return e
     # Intentionally catching all exceptions, as they are stored in a list as return values
     # by the caller
     # pylint: disable=broad-except
diff --git a/src/load_generator/command/__main__.py b/src/load_generator/command/__main__.py
index 7504eb6da6d6adea698249240abf2c4e4559297a..a97f081a32269ff824733b9a2a69be21bfb2004f 100644
--- a/src/load_generator/command/__main__.py
+++ b/src/load_generator/command/__main__.py
@@ -36,6 +36,10 @@ def main():
         ],
         offered_load  = 50,
         holding_time  = 10,
+        availability_ranges   = [[0.0, 99.9999]],
+        capacity_gbps_ranges  = [[0.1, 100.00]],
+        e2e_latency_ms_ranges = [[5.0, 100.00]],
+        max_workers   = 10,
         dry_mode      = False,           # in dry mode, no request is sent to TeraFlowSDN
         record_to_dlt = False,           # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT
         dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False
diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py
index 9ae3cdc1216891ca4dfcf01c1bd49d27bf4ef6f6..09cdecab124a776d3f71f66554db0934eaf1bb1c 100644
--- a/src/load_generator/load_gen/Constants.py
+++ b/src/load_generator/load_gen/Constants.py
@@ -27,4 +27,8 @@ ENDPOINT_COMPATIBILITY = {
     'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT'  : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT',
 }
 
-MAX_WORKER_THREADS = 10
\ No newline at end of file
+DEFAULT_AVAILABILITY_RANGES   = [[0.0, 99.9999]]
+DEFAULT_CAPACITY_GBPS_RANGES  = [[0.1, 100.00]]
+DEFAULT_E2E_LATENCY_MS_RANGES = [[5.0, 100.00]]
+
+DEFAULT_MAX_WORKERS = 10
diff --git a/src/load_generator/load_gen/Parameters.py b/src/load_generator/load_gen/Parameters.py
index f0de3ea1aa268c520fd214f7f621953289ac5bc9..aca40cd3854fad203f15ce9b07a79715e9ea46f6 100644
--- a/src/load_generator/load_gen/Parameters.py
+++ b/src/load_generator/load_gen/Parameters.py
@@ -13,18 +13,30 @@
 # limitations under the License.
 
 from typing import List, Optional
+from load_generator.load_gen.Constants import (
+    DEFAULT_AVAILABILITY_RANGES, DEFAULT_CAPACITY_GBPS_RANGES, DEFAULT_E2E_LATENCY_MS_RANGES, DEFAULT_MAX_WORKERS)
+from load_generator.tools.ListScalarRange import Type_ListScalarRange
+
 
 class Parameters:
     def __init__(
         self, num_requests : int, request_types : List[str], offered_load : Optional[float] = None,
-        inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None, do_teardown : bool = True,
-        dry_mode : bool = False, record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None
+        inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None,
+        availability_ranges : Type_ListScalarRange = DEFAULT_AVAILABILITY_RANGES,
+        capacity_gbps_ranges : Type_ListScalarRange = DEFAULT_CAPACITY_GBPS_RANGES,
+        e2e_latency_ms_ranges : Type_ListScalarRange = DEFAULT_E2E_LATENCY_MS_RANGES,
+        max_workers : int = DEFAULT_MAX_WORKERS, do_teardown : bool = True, dry_mode : bool = False,
+        record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None
     ) -> None:
         self._num_requests = num_requests
         self._request_types = request_types
         self._offered_load = offered_load
         self._inter_arrival_time = inter_arrival_time
         self._holding_time = holding_time
+        self._availability_ranges = availability_ranges
+        self._capacity_gbps_ranges = capacity_gbps_ranges
+        self._e2e_latency_ms_ranges = e2e_latency_ms_ranges
+        self._max_workers = max_workers
         self._do_teardown = do_teardown
         self._dry_mode = dry_mode
         self._record_to_dlt = record_to_dlt
@@ -59,6 +71,18 @@ class Parameters:
     @property
     def holding_time(self): return self._holding_time
 
+    @property
+    def availability_ranges(self): return self._availability_ranges
+
+    @property
+    def capacity_gbps_ranges(self): return self._capacity_gbps_ranges
+
+    @property
+    def e2e_latency_ms_ranges(self): return self._e2e_latency_ms_ranges
+
+    @property
+    def max_workers(self): return self._max_workers
+
     @property
     def do_teardown(self): return self._do_teardown
 
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index 5c56ea6ec603f4e9bb3fc72d5baa47f05ea0c991..3a52b3b322bfefe60e7c5c8d3eed585b92b40353 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, json, random, threading
+import logging, json, random, re, threading, uuid
 from typing import Dict, Optional, Set, Tuple
 from common.proto.context_pb2 import Empty, IsolationLevelEnum, TopologyId
 from common.tools.grpc.Tools import grpc_message_to_json
@@ -28,6 +28,7 @@ from common.tools.object_factory.Slice import json_slice
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from dlt.connector.client.DltConnectorClient import DltConnectorClient
+from load_generator.tools.ListScalarRange import generate_value
 from .Constants import ENDPOINT_COMPATIBILITY, RequestType
 from .DltTools import record_device_to_dlt, record_link_to_dlt
 from .Parameters import Parameters
@@ -53,6 +54,7 @@ class RequestGenerator:
         self._parameters = parameters
         self._lock = threading.Lock()
         self._num_generated = 0
+        self._num_released = 0
         self._available_device_endpoints : Dict[str, Set[str]] = dict()
         self._used_device_endpoints : Dict[str, Dict[str, str]] = dict()
         self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict()
@@ -64,6 +66,9 @@ class RequestGenerator:
     @property
     def num_generated(self): return self._num_generated
 
+    @property
+    def num_released(self): return self._num_released
+
     @property
     def infinite_loop(self): return self._parameters.num_requests == 0
 
@@ -186,28 +191,23 @@ class RequestGenerator:
             self._used_device_endpoints.setdefault(device_uuid, dict()).pop(endpoint_uuid, None)
             self._available_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid)
 
-    def compose_request(self) -> Tuple[bool, Optional[Dict]]: # completed, request
+    def compose_request(self) -> Tuple[bool, Optional[Dict], str]: # completed, request
         with self._lock:
             if not self.infinite_loop and (self._num_generated >= self._parameters.num_requests):
                 LOGGER.info('Generation Done!')
-                return True, None # completed
-            self._num_generated += 1
-            num_request = self._num_generated
+                return True, None, None # completed
 
-        #request_uuid = str(uuid.uuid4())
-        request_uuid = 'svc_{:d}'.format(num_request)
-        
-        # choose request type
+        request_uuid = str(uuid.uuid4())
         request_type = random.choice(self._parameters.request_types)
 
         if request_type in {
             RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, RequestType.SERVICE_TAPI, RequestType.SERVICE_MW
         }:
-            return False, self._compose_service(num_request, request_uuid, request_type)
+            return False, self._compose_service(request_uuid, request_type), request_type
         elif request_type in {RequestType.SLICE_L2NM, RequestType.SLICE_L3NM}:
-            return False, self._compose_slice(num_request, request_uuid, request_type)
+            return False, self._compose_slice(request_uuid, request_type), request_type
 
-    def _compose_service(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]:
+    def _compose_service(self, request_uuid : str, request_type : str) -> Optional[Dict]:
         # choose source endpoint
         src_endpoint_types = set(ENDPOINT_COMPATIBILITY.keys()) if request_type in {RequestType.SERVICE_TAPI} else None
         src = self._use_device_endpoint(request_uuid, request_type, endpoint_types=src_endpoint_types)
@@ -236,6 +236,10 @@ class RequestGenerator:
             self._release_device_endpoint(src_device_uuid, src_endpoint_uuid)
             return None
 
+        with self._lock:
+            self._num_generated += 1
+            num_request = self._num_generated
+
         # compose endpoints
         dst_device_uuid,dst_endpoint_uuid = dst
         endpoint_ids = [
@@ -244,9 +248,9 @@ class RequestGenerator:
         ]
 
         if request_type == RequestType.SERVICE_L2NM:
-            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
-            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
-            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+            availability   = generate_value(self._parameters.availability_ranges,   ndigits=5)
+            capacity_gbps  = generate_value(self._parameters.capacity_gbps_ranges,  ndigits=2)
+            e2e_latency_ms = generate_value(self._parameters.e2e_latency_ms_ranges, ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -261,12 +265,12 @@ class RequestGenerator:
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
             src_router_id = ROUTER_ID.get(src_device_name)
-            src_router_num = int(src_device_name.replace('R', ''))
+            src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
             if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
 
             dst_device_name = self._device_data[dst_device_uuid]['name']
             dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name']
-            dst_router_num = int(dst_device_name.replace('R', ''))
+            dst_router_num = int(re.findall(r'^\D*(\d+)', dst_device_name)[0])
             dst_router_id = ROUTER_ID.get(dst_device_name)
             if dst_router_id is None: dst_router_id = '10.0.0.{:d}'.format(dst_router_num)
 
@@ -293,9 +297,9 @@ class RequestGenerator:
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
         elif request_type == RequestType.SERVICE_L3NM:
-            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
-            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
-            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+            availability   = generate_value(self._parameters.availability_ranges,   ndigits=5)
+            capacity_gbps  = generate_value(self._parameters.capacity_gbps_ranges,  ndigits=2)
+            e2e_latency_ms = generate_value(self._parameters.e2e_latency_ms_ranges, ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -314,13 +318,13 @@ class RequestGenerator:
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
             src_router_id = ROUTER_ID.get(src_device_name)
-            src_router_num = int(src_device_name.replace('R', ''))
+            src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
             if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
             src_address_ip = '10.{:d}.{:d}.{:d}'.format(x, y, src_router_num)
 
             dst_device_name = self._device_data[dst_device_uuid]['name']
             dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name']
-            dst_router_num = int(dst_device_name.replace('R', ''))
+            dst_router_num = int(re.findall(r'^\D*(\d+)', dst_device_name)[0])
             dst_router_id = ROUTER_ID.get(dst_device_name)
             if dst_router_id is None: dst_router_id = '10.0.0.{:d}'.format(dst_router_num)
             dst_address_ip = '10.{:d}.{:d}.{:d}'.format(y, x, dst_router_num)
@@ -382,7 +386,7 @@ class RequestGenerator:
             return json_service_l2nm_planned(
                 request_uuid, endpoint_ids=endpoint_ids, constraints=[], config_rules=config_rules)
 
-    def _compose_slice(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]:
+    def _compose_slice(self, request_uuid : str, request_type : str) -> Optional[Dict]:
         # choose source endpoint
         src = self._use_device_endpoint(request_uuid, request_type)
         if src is None:
@@ -403,6 +407,10 @@ class RequestGenerator:
             self._release_device_endpoint(src_device_uuid, src_endpoint_uuid)
             return None
 
+        with self._lock:
+            self._num_generated += 1
+            num_request = self._num_generated
+
         # compose endpoints
         dst_device_uuid,dst_endpoint_uuid = dst
         endpoint_ids = [
@@ -410,9 +418,10 @@ class RequestGenerator:
             json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
         ]
 
-        availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
-        capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
-        e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+        availability   = generate_value(self._parameters.availability_ranges,   ndigits=5)
+        capacity_gbps  = generate_value(self._parameters.capacity_gbps_ranges,  ndigits=2)
+        e2e_latency_ms = generate_value(self._parameters.e2e_latency_ms_ranges, ndigits=2)
+
         constraints = [
             json_constraint_sla_availability(1, True, availability),
             json_constraint_sla_capacity(capacity_gbps),
@@ -425,10 +434,10 @@ class RequestGenerator:
             circuit_id = '{:03d}'.format(vlan_id)
 
             src_device_name = self._device_data[src_device_uuid]['name']
-            src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', '')))
+            src_router_id = '10.0.0.{:d}'.format(int(re.findall(r'^\D*(\d+)', src_device_name)[0]))
 
             dst_device_name = self._device_data[dst_device_uuid]['name']
-            dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', '')))
+            dst_router_id = '10.0.0.{:d}'.format(int(re.findall(r'^\D*(\d+)', dst_device_name)[0]))
 
             config_rules = [
                 json_config_rule_set('/settings', {
@@ -460,13 +469,13 @@ class RequestGenerator:
 
             src_device_name = self._device_data[src_device_uuid]['name']
             src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
-            src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', '')))
-            src_address_ip = '.'.join([src_device_name.replace('R', ''), '0'] + src_endpoint_name.split('/'))
+            src_router_id = '10.0.0.{:d}'.format(int(re.findall(r'^\D*(\d+)', src_device_name)[0]))
+            src_address_ip = '.'.join([re.findall(r'^\D*(\d+)', src_device_name)[0], '0'] + src_endpoint_name.split('/'))
 
             dst_device_name = self._device_data[dst_device_uuid]['name']
             dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name']
-            dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', '')))
-            dst_address_ip = '.'.join([dst_device_name.replace('R', ''), '0'] + dst_endpoint_name.split('/'))
+            dst_router_id = '10.0.0.{:d}'.format(int(re.findall(r'^\D*(\d+)', dst_device_name)[0]))
+            dst_address_ip = '.'.join([re.findall(r'^\D*(\d+)', dst_device_name)[0], '0'] + dst_endpoint_name.split('/'))
 
             config_rules = [
                 json_config_rule_set('/settings', {
@@ -503,8 +512,15 @@ class RequestGenerator:
                 device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
                 endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
                 self._release_device_endpoint(device_uuid, endpoint_uuid)
+
+            with self._lock:
+                self._num_released += 1
+
         elif 'slice_id' in json_request:
             for endpoint_id in json_request['slice_endpoint_ids']:
                 device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
                 endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
                 self._release_device_endpoint(device_uuid, endpoint_uuid)
+
+            with self._lock:
+                self._num_released += 1
diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py
index 773a37eac258f8b3c16c966464ced124d3c77c85..340a5411bacee7b0aeb495963cdf65fbb5d14389 100644
--- a/src/load_generator/load_gen/RequestScheduler.py
+++ b/src/load_generator/load_gen/RequestScheduler.py
@@ -18,10 +18,11 @@ from apscheduler.jobstores.memory import MemoryJobStore
 from apscheduler.schedulers.blocking import BlockingScheduler
 from datetime import datetime, timedelta
 from typing import Dict, Optional
+from common.method_wrappers.Decorator import MetricsPool
 from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
-from .Constants import MAX_WORKER_THREADS
 from .DltTools import explore_entities_to_record, record_entities
 from .Parameters import Parameters
 from .RequestGenerator import RequestGenerator
@@ -31,6 +32,10 @@ logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
 
 LOGGER = logging.getLogger(__name__)
 
+METRICS_POOL = MetricsPool('LoadGen', 'Requests', labels={
+    'request_type': ''
+})
+
 class RequestScheduler:
     def __init__(
         self, parameters : Parameters, generator : RequestGenerator, scheduler_class=BlockingScheduler
@@ -38,7 +43,7 @@ class RequestScheduler:
         self._scheduler = scheduler_class()
         self._scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)},
+            executors = {'default': ThreadPoolExecutor(max_workers=parameters.max_workers)},
             job_defaults = {
                 'coalesce': False,
                 'max_instances': 100,
@@ -52,6 +57,9 @@ class RequestScheduler:
     @property
     def num_generated(self): return min(self._generator.num_generated, self._parameters.num_requests)
 
+    @property
+    def num_released(self): return min(self._generator.num_released, self._parameters.num_requests)
+
     @property
     def infinite_loop(self): return self._generator.infinite_loop
 
@@ -64,11 +72,12 @@ class RequestScheduler:
         self._scheduler.add_job(
             self._request_setup, trigger='date', run_date=run_date, timezone=pytz.utc)
 
-    def _schedule_request_teardown(self, request : Dict) -> None:
+    def _schedule_request_teardown(self, request : Dict, request_type : str) -> None:
         ht  = random.expovariate(1.0 / self._parameters.holding_time)
         run_date = datetime.utcnow() + timedelta(seconds=ht)
+        args = (request, request_type)
         self._scheduler.add_job(
-            self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc)
+            self._request_teardown, args=args, trigger='date', run_date=run_date, timezone=pytz.utc)
 
     def start(self):
         self._running.set()
@@ -80,7 +89,7 @@ class RequestScheduler:
         self._running.clear()
 
     def _request_setup(self) -> None:
-        completed,request = self._generator.compose_request()
+        completed, request, request_type = self._generator.compose_request()
         if completed:
             LOGGER.info('Generation Done!')
             #self._scheduler.shutdown()
@@ -91,6 +100,9 @@ class RequestScheduler:
 
         if request is None:
             LOGGER.warning('No resources available to compose new request')
+            metrics = METRICS_POOL.get_metrics_loadgen('setup', labels={'request_type': request_type})
+            _, _, _, _, counter_blocked = metrics
+            counter_blocked.inc()
             return
 
         if 'service_id' in request:
@@ -101,7 +113,7 @@ class RequestScheduler:
             dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Setup Service: uuid=%s src=%s:%s dst=%s:%s',
                 service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._create_update(service=request)
+            self._create_update(request_type, service=request)
 
         elif 'slice_id' in request:
             slice_uuid = request['slice_id']['slice_uuid']['uuid']
@@ -111,12 +123,12 @@ class RequestScheduler:
             dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Setup Slice: uuid=%s src=%s:%s dst=%s:%s',
                 slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._create_update(slice_=request)
+            self._create_update(request_type, slice_=request)
 
         if self._parameters.do_teardown:
-            self._schedule_request_teardown(request)
+            self._schedule_request_teardown(request, request_type)
 
-    def _request_teardown(self, request : Dict) -> None:
+    def _request_teardown(self, request : Dict, request_type : str) -> None:
         if 'service_id' in request:
             service_uuid = request['service_id']['service_uuid']['uuid']
             src_device_uuid = request['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
@@ -125,7 +137,7 @@ class RequestScheduler:
             dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Teardown Service: uuid=%s src=%s:%s dst=%s:%s',
                 service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._delete(service_id=ServiceId(**(request['service_id'])))
+            self._delete(request_type, service_id=ServiceId(**(request['service_id'])))
 
         elif 'slice_id' in request:
             slice_uuid = request['slice_id']['slice_uuid']['uuid']
@@ -135,33 +147,64 @@ class RequestScheduler:
             dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Teardown Slice: uuid=%s src=%s:%s dst=%s:%s',
                 slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._delete(slice_id=SliceId(**(request['slice_id'])))
+            self._delete(request_type, slice_id=SliceId(**(request['slice_id'])))
 
         self._generator.release_request(request)
 
-    def _create_update(self, service : Optional[Dict] = None, slice_ : Optional[Dict] = None) -> None:
+    def _create_update(
+        self, request_type : str, service : Optional[Dict] = None, slice_ : Optional[Dict] = None
+    ) -> None:
         if self._parameters.dry_mode: return
 
+        metrics = METRICS_POOL.get_metrics_loadgen('setup', labels={'request_type': request_type})
+        histogram_duration, counter_started, counter_completed, counter_failed, _ = metrics
+
         service_id = None
         if service is not None:
+            service_client = ServiceClient()
+
             service_add = copy.deepcopy(service)
             service_add['service_endpoint_ids'] = []
             service_add['service_constraints'] = []
             service_add['service_config'] = {'config_rules': []}
+            service_add = Service(**service_add)
+            service = Service(**service)
+
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    service_id = service_client.CreateService(service_add)
+                    service_id = service_client.UpdateService(service)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Setting Up Service {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(service)))
 
-            service_client = ServiceClient()
-            service_id = service_client.CreateService(Service(**service_add))
             service_client.close()
 
         slice_id = None
         if slice_ is not None:
+            slice_client = SliceClient()
+
             slice_add = copy.deepcopy(slice_)
             slice_add['slice_endpoint_ids'] = []
             slice_add['slice_constraints'] = []
             slice_add['slice_config'] = {'config_rules': []}
+            slice_add = Slice(**slice_add)
+            slice_ = Slice(**slice_)
+
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    slice_id = slice_client.CreateSlice(slice_add)
+                    slice_id = slice_client.UpdateSlice(slice_)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Setting Up Slice {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(slice_)))
 
-            slice_client = SliceClient()
-            slice_id = slice_client.CreateSlice(Slice(**slice_add))
             slice_client.close()
 
         if self._parameters.record_to_dlt:
@@ -171,41 +214,47 @@ class RequestScheduler:
                 slices_to_record=slices_to_record, services_to_record=services_to_record,
                 devices_to_record=devices_to_record, delete=False)
 
-        service_id = None
-        if service is not None:
-            service_client = ServiceClient()
-            service_id = service_client.UpdateService(Service(**service))
-            service_client.close()
+    def _delete(
+        self, request_type : str, service_id : Optional[ServiceId] = None, slice_id : Optional[SliceId] = None
+    ) -> None:
+        if self._parameters.dry_mode: return
 
-        slice_id = None
-        if slice_ is not None:
-            slice_client = SliceClient()
-            slice_id = slice_client.UpdateSlice(Slice(**slice_))
-            slice_client.close()
+        metrics = METRICS_POOL.get_metrics_loadgen('teardown', labels={'request_type': request_type})
+        histogram_duration, counter_started, counter_completed, counter_failed, _ = metrics
 
         if self._parameters.record_to_dlt:
             entities_to_record = explore_entities_to_record(slice_id=slice_id, service_id=service_id)
             slices_to_record, services_to_record, devices_to_record = entities_to_record
-            record_entities(
-                slices_to_record=slices_to_record, services_to_record=services_to_record,
-                devices_to_record=devices_to_record, delete=False)
 
-    def _delete(self, service_id : Optional[ServiceId] = None, slice_id : Optional[SliceId] = None) -> None:
-        if self._parameters.dry_mode: return
+        if service_id is not None:
+            service_client = ServiceClient()
 
-        if self._parameters.record_to_dlt:
-            entities_to_record = explore_entities_to_record(slice_id=slice_id, service_id=service_id)
-            slices_to_record, services_to_record, devices_to_record = entities_to_record
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    service_client.DeleteService(service_id)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Tearing Down Service {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(service_id)))
+
+            service_client.close()
 
         if slice_id is not None:
             slice_client = SliceClient()
-            slice_client.DeleteSlice(slice_id)
-            slice_client.close()
 
-        if service_id is not None:
-            service_client = ServiceClient()
-            service_client.DeleteService(service_id)
-            service_client.close()
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    slice_client.DeleteSlice(slice_id)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Tearing Down Slice {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(slice_id)))
+
+            slice_client.close()
 
         if self._parameters.record_to_dlt:
             record_entities(
diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
index d66b0b2c10c5228e0c3d15759fc46b2c0770154d..9f12f34920fda69ba55963876e96f51a8256537c 100644
--- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
+++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
@@ -21,6 +21,7 @@ from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer
 from load_generator.load_gen.Parameters import Parameters as LoadGen_Parameters
 from load_generator.load_gen.RequestGenerator import RequestGenerator
 from load_generator.load_gen.RequestScheduler import RequestScheduler
+from load_generator.tools.ListScalarRange import list_scalar_range__grpc_to_list, list_scalar_range__list_to_grpc
 from .Constants import REQUEST_TYPE_MAP, REQUEST_TYPE_REVERSE_MAP
 
 LOGGER = logging.getLogger(__name__)
@@ -34,15 +35,19 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
 
     def Start(self, request : Parameters, context : grpc.ServicerContext) -> Empty:
         self._parameters = LoadGen_Parameters(
-            num_requests       = request.num_requests,
-            request_types      = [REQUEST_TYPE_MAP[rt] for rt in request.request_types],
-            offered_load       = request.offered_load if request.offered_load > 1.e-12 else None,
-            holding_time       = request.holding_time if request.holding_time > 1.e-12 else None,
-            inter_arrival_time = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None,
-            do_teardown        = request.do_teardown,   # if set, schedule tear down of requests
-            dry_mode           = request.dry_mode,      # in dry mode, no request is sent to TeraFlowSDN
-            record_to_dlt      = request.record_to_dlt, # if set, upload changes to DLT
-            dlt_domain_id      = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True)
+            num_requests          = request.num_requests,
+            request_types         = [REQUEST_TYPE_MAP[rt] for rt in request.request_types],
+            offered_load          = request.offered_load if request.offered_load > 1.e-12 else None,
+            holding_time          = request.holding_time if request.holding_time > 1.e-12 else None,
+            inter_arrival_time    = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None,
+            availability_ranges   = list_scalar_range__grpc_to_list(request.availability  ),
+            capacity_gbps_ranges  = list_scalar_range__grpc_to_list(request.capacity_gbps ),
+            e2e_latency_ms_ranges = list_scalar_range__grpc_to_list(request.e2e_latency_ms),
+            max_workers           = request.max_workers,
+            do_teardown           = request.do_teardown,   # if set, schedule tear down of requests
+            dry_mode              = request.dry_mode,      # in dry mode, no request is sent to TeraFlowSDN
+            record_to_dlt         = request.record_to_dlt, # if set, upload changes to DLT
+            dlt_domain_id         = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True)
         )
 
         LOGGER.info('Initializing Generator...')
@@ -68,17 +73,26 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
 
         status = Status()
         status.num_generated = self._scheduler.num_generated
+        status.num_released  = self._scheduler.num_released
         status.infinite_loop = self._scheduler.infinite_loop
         status.running       = self._scheduler.running
-        status.parameters.num_requests       = params.num_requests          # pylint: disable=no-member
-        status.parameters.offered_load       = params.offered_load          # pylint: disable=no-member
-        status.parameters.holding_time       = params.holding_time          # pylint: disable=no-member
-        status.parameters.inter_arrival_time = params.inter_arrival_time    # pylint: disable=no-member
-        status.parameters.do_teardown        = params.do_teardown           # pylint: disable=no-member
-        status.parameters.dry_mode           = params.dry_mode              # pylint: disable=no-member
-        status.parameters.record_to_dlt      = params.record_to_dlt         # pylint: disable=no-member
-        status.parameters.dlt_domain_id      = params.dlt_domain_id         # pylint: disable=no-member
-        status.parameters.request_types.extend(request_types)               # pylint: disable=no-member
+
+        stat_pars = status.parameters                               # pylint: disable=no-member
+        stat_pars.num_requests       = params.num_requests          # pylint: disable=no-member
+        stat_pars.offered_load       = params.offered_load          # pylint: disable=no-member
+        stat_pars.holding_time       = params.holding_time          # pylint: disable=no-member
+        stat_pars.inter_arrival_time = params.inter_arrival_time    # pylint: disable=no-member
+        stat_pars.max_workers        = params.max_workers           # pylint: disable=no-member
+        stat_pars.do_teardown        = params.do_teardown           # pylint: disable=no-member
+        stat_pars.dry_mode           = params.dry_mode              # pylint: disable=no-member
+        stat_pars.record_to_dlt      = params.record_to_dlt         # pylint: disable=no-member
+        stat_pars.dlt_domain_id      = params.dlt_domain_id         # pylint: disable=no-member
+        stat_pars.request_types.extend(request_types)               # pylint: disable=no-member
+
+        list_scalar_range__list_to_grpc(params.availability_ranges,   stat_pars.availability  ) # pylint: disable=no-member
+        list_scalar_range__list_to_grpc(params.capacity_gbps_ranges,  stat_pars.capacity_gbps ) # pylint: disable=no-member
+        list_scalar_range__list_to_grpc(params.e2e_latency_ms_ranges, stat_pars.e2e_latency_ms) # pylint: disable=no-member
+
         return status
 
     def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty:
diff --git a/src/load_generator/service/__main__.py b/src/load_generator/service/__main__.py
index 227099c59aa57f420c842a6210f3b8b146b23cda..7051a9a18bb2a86e2ca298b9ddfdc32f3e3fa6e7 100644
--- a/src/load_generator/service/__main__.py
+++ b/src/load_generator/service/__main__.py
@@ -13,14 +13,15 @@
 # limitations under the License.
 
 import logging, signal, sys, threading
+from prometheus_client import start_http_server
 from common.Constants import ServiceNameEnum
 from common.Settings import (
-    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level,
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
     wait_for_environment_variables)
 from .LoadGeneratorService import LoadGeneratorService
 
-log_level = get_log_level()
-logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+LOG_LEVEL = get_log_level()
+logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
 LOGGER = logging.getLogger(__name__)
 
 terminate = threading.Event()
@@ -39,10 +40,13 @@ def main():
         get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
+    LOGGER.info('Starting...')
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
-    LOGGER.info('Starting...')
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
 
     # Starting load generator service
     grpc_service = LoadGeneratorService()
diff --git a/src/load_generator/tools/ListScalarRange.py b/src/load_generator/tools/ListScalarRange.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a5a5f39940049adee6dfbe35befd815db9256fe
--- /dev/null
+++ b/src/load_generator/tools/ListScalarRange.py
@@ -0,0 +1,99 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+from typing import List, Optional, Tuple, Union
+
+from common.proto.load_generator_pb2 import ScalarOrRange
+
+# RegEx to validate strings formatted as: '1, 2.3, 4.5  .. 6.7 , .8...9, 10., .11'
+# IMPORTANT: this regex just validates data, it does not extract the pieces of data!
+RE_FLOAT = r'[\ ]*[0-9]*[\.]?[0-9]*[\ ]*'
+RE_RANGE = RE_FLOAT + r'(\.\.' + RE_FLOAT + r')?'
+RE_SCALAR_RANGE_LIST  = RE_RANGE + r'(\,' + RE_RANGE + r')*'
+
+Type_ListScalarRange = List[Union[float, Tuple[float, float]]]
+
+def parse_list_scalar_range(value : str) -> Type_ListScalarRange:
+    str_value = str(value).replace(' ', '')
+    ranges = [[float(value) for value in item.split('..')] for item in str_value.split(',')]
+    return ranges
+
+def list_scalar_range__list_to_grpc(list_scalar_range : Type_ListScalarRange, obj : List[ScalarOrRange]) -> None:
+    for i,scalar_or_range in enumerate(list_scalar_range):
+        if isinstance(scalar_or_range, (float, str)):
+            _scalar = obj.add()
+            _scalar.scalar = float(scalar_or_range)
+        elif isinstance(scalar_or_range, (list, tuple)):
+            if len(scalar_or_range) == 1:
+                _scalar = obj.add()
+                _scalar.scalar = float(scalar_or_range[0])
+            elif len(scalar_or_range) == 2:
+                _range = obj.add()
+                _range.range.minimum = float(scalar_or_range[0])
+                _range.range.maximum = float(scalar_or_range[1])
+            else:
+                MSG = 'List/tuple with {:d} items in item(#{:d}, {:s})'
+                raise NotImplementedError(MSG.format(len(scalar_or_range), i, str(scalar_or_range)))
+        else:
+            MSG = 'Type({:s}) in item(#{:d}, {:s})'
+            raise NotImplementedError(MSG.format(str(type(scalar_or_range), i, str(scalar_or_range))))
+
+def list_scalar_range__grpc_to_str(obj : List[ScalarOrRange]) -> str:
+    str_items = list()
+    for item in obj:
+        item_kind = item.WhichOneof('value')
+        if item_kind == 'scalar':
+            str_items.append(str(item.scalar))
+        elif item_kind == 'range':
+            str_items.append('{:s}..{:s}'.format(str(item.range.minimum), str(item.range.maximum)))
+        else:
+            raise NotImplementedError('Unsupported ScalarOrRange kind({:s})'.format(str(item_kind)))
+    return ','.join(str_items)
+
+def list_scalar_range__grpc_to_list(obj : List[ScalarOrRange]) -> Type_ListScalarRange:
+    list_scalar_range = list()
+    for item in obj:
+        item_kind = item.WhichOneof('value')
+        if item_kind == 'scalar':
+            scalar_or_range = float(item.scalar)
+        elif item_kind == 'range':
+            scalar_or_range = (float(item.range.minimum), float(item.range.maximum))
+        else:
+            raise NotImplementedError('Unsupported ScalarOrRange kind({:s})'.format(str(item_kind)))
+        list_scalar_range.append(scalar_or_range)
+    return list_scalar_range
+
+def generate_value(
+    list_scalar_range : Type_ListScalarRange, ndigits : Optional[int] = None
+) -> float:
+    scalar_or_range = random.choice(list_scalar_range)
+    if isinstance(scalar_or_range, (float, str)):
+        value = float(scalar_or_range)
+    elif isinstance(scalar_or_range, (list, tuple)):
+        if len(scalar_or_range) == 1:
+            value = float(scalar_or_range[0])
+        elif len(scalar_or_range) == 2:
+            minimum = float(scalar_or_range[0])
+            maximum = float(scalar_or_range[1])
+            value = random.uniform(minimum, maximum)
+        else:
+            MSG = 'List/tuple with {:d} items in item({:s})'
+            raise NotImplementedError(MSG.format(len(scalar_or_range), str(scalar_or_range)))
+    else:
+        MSG = 'Type({:s}) in item({:s})'
+        raise NotImplementedError(MSG.format(str(type(scalar_or_range), str(scalar_or_range))))
+
+    if ndigits is None: return value
+    return round(value, ndigits=ndigits)
diff --git a/src/load_generator/tools/__init__.py b/src/load_generator/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..38d04994fb0fa1951fb465bc127eb72659dc2eaf
--- /dev/null
+++ b/src/load_generator/tools/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py
index 6b98255411aa88ac18bd01474830b3bf268d3483..f928f07b94c71fb6f378161862e96d41af8bde7f 100644
--- a/src/monitoring/service/MetricsDBTools.py
+++ b/src/monitoring/service/MetricsDBTools.py
@@ -264,68 +264,65 @@ class MetricsDB():
                 for kpi in kpi_list:
                     alarm = False
                     kpi_value = kpi[2]
+                    kpiMinIsNone = ((kpiMinValue is None) or math.isnan(kpiMinValue))
+                    kpiMaxIsNone = ((kpiMaxValue is None) or math.isnan(kpiMaxValue))
                     if (kpiMinValue == kpi_value and kpiMaxValue == kpi_value and inRange):
                         alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and includeMaxValue):
                         if (kpi_value >= kpiMinValue and kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and not includeMaxValue):
                         if (kpi_value >= kpiMinValue and kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and includeMaxValue):
                         if (kpi_value > kpiMinValue and kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and not includeMaxValue):
                         if (kpi_value > kpiMinValue and kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and includeMaxValue):
                         if (kpi_value <= kpiMinValue or kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and not includeMaxValue):
                         if (kpi_value <= kpiMinValue or kpi_value > kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and includeMaxValue):
                         if (kpi_value < kpiMinValue or kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and not includeMaxValue):
                         if (kpi_value < kpiMinValue or kpi_value > kpiMaxValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and includeMinValue):
+                    elif (inRange and not kpiMinIsNone and kpiMaxIsNone and includeMinValue):
                         if (kpi_value >= kpiMinValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                    elif (inRange and not kpiMinIsNone and kpiMaxIsNone and not includeMinValue):
                         if (kpi_value > kpiMinValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                    elif (not inRange and not kpiMinIsNone and kpiMaxIsNone and includeMinValue):
                         if (kpi_value <= kpiMinValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
-                        if (kpi_value <= kpiMinValue):
+                    elif (not inRange and not kpiMinIsNone and kpiMaxIsNone and not includeMinValue):
+                        if (kpi_value < kpiMinValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and includeMaxValue):
+                    elif (inRange and kpiMinIsNone and not kpiMaxIsNone and includeMaxValue):
                         if (kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                    elif (inRange and kpiMinIsNone and not kpiMaxIsNone and not includeMaxValue):
                         if (kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                    elif (not inRange and kpiMinIsNone and not kpiMaxIsNone and includeMaxValue):
                         if (kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
-                        if (kpi_value >= kpiMaxValue):
+                    elif (not inRange and kpiMinIsNone and not kpiMaxIsNone and not includeMaxValue):
+                        if (kpi_value > kpiMaxValue):
                             alarm = True
                     if alarm:
                         valid_kpi_list.append(kpi)
-                alarm_queue.put_nowait(valid_kpi_list)
-                LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+                if valid_kpi_list:
+                    alarm_queue.put_nowait(valid_kpi_list)
+                    LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+                else:
+                    LOGGER.debug(f"No new alarms triggered for the alarm of KPI {kpi_id}")
             else:
                 LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}")
         except (Exception) as e:
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index f408734df40c1bc5c16b7e108e3ce5a211165f71..3bfef65ff0c52f110b9a091e96b6f6b97dfa79cf 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -12,12 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os, grpc
+import logging, os, grpc
 from queue import Queue
-
 from typing import Iterator
-
-from common.logger import getJSONLogger
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.proto.context_pb2 import Empty
 from common.proto.device_pb2 import MonitoringSettings
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
@@ -25,30 +23,22 @@ from common.proto.monitoring_pb2_grpc import MonitoringServiceServicer
 from common.proto.monitoring_pb2 import AlarmResponse, AlarmDescriptor, AlarmList, SubsList, KpiId, \
     KpiDescriptor, KpiList, KpiQuery, SubsDescriptor, SubscriptionID, AlarmID, KpiDescriptorList, \
     MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse, RawKpiTable, RawKpi, RawKpiList
-from common.method_wrappers.ServiceExceptions import ServiceException
 from common.tools.timestamp.Converters import timestamp_string_to_float, timestamp_utcnow_to_float
-
-from monitoring.service import ManagementDBTools, MetricsDBTools
 from device.client.DeviceClient import DeviceClient
-
-from prometheus_client import Counter, Summary
-
+from monitoring.service import ManagementDBTools, MetricsDBTools
 from monitoring.service.AlarmManager import AlarmManager
 from monitoring.service.NameMapping import NameMapping
 from monitoring.service.SubscriptionManager import SubscriptionManager
 
-LOGGER = getJSONLogger('monitoringservice-server')
-LOGGER.setLevel('DEBUG')
-
-MONITORING_GETINSTANTKPI_REQUEST_TIME = Summary(
-    'monitoring_getinstantkpi_processing_seconds', 'Time spent processing monitoring instant kpi request')
-MONITORING_INCLUDEKPI_COUNTER = Counter('monitoring_includekpi_counter', 'Monitoring include kpi request counter')
+LOGGER = logging.getLogger(__name__)
 
 METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME")
 METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT")
 METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT")
 METRICSDB_TABLE_MONITORING_KPIS = os.environ.get("METRICSDB_TABLE_MONITORING_KPIS")
 
+METRICS_POOL = MetricsPool('Monitoring', 'RPC')
+
 class MonitoringServiceServicerImpl(MonitoringServiceServicer):
     def __init__(self, name_mapping : NameMapping):
         LOGGER.info('Init monitoringService')
@@ -63,514 +53,363 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         LOGGER.info('MetricsDB initialized')
 
     # SetKpi (SetKpiRequest) returns (KpiId) {}
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetKpi(
             self, request: KpiDescriptor, grpc_context: grpc.ServicerContext
     ) -> KpiId:
-        # CREATEKPI_COUNTER_STARTED.inc()
-        LOGGER.info('SetKpi')
-        try:
-            # Here the code to create a sqlite query to crete a KPI and return a KpiID
-            response = KpiId()
-
-            kpi_description = request.kpi_description
-            kpi_sample_type = request.kpi_sample_type
-            kpi_device_id = request.device_id.device_uuid.uuid
-            kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid
-            kpi_service_id = request.service_id.service_uuid.uuid
-            kpi_slice_id = request.slice_id.slice_uuid.uuid
-            kpi_connection_id = request.connection_id.connection_uuid.uuid
-
-
-            if request.kpi_id.kpi_id.uuid != "":
-                response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
-            #     Here the code to modify an existing kpi
-            else:
-                data = self.management_db.insert_KPI(
-                    kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, kpi_slice_id, kpi_connection_id)
-                response.kpi_id.uuid = str(data)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('SetKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('SetKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
+        response = KpiId()
+        kpi_description = request.kpi_description
+        kpi_sample_type = request.kpi_sample_type
+        kpi_device_id = request.device_id.device_uuid.uuid
+        kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid
+        kpi_service_id = request.service_id.service_uuid.uuid
+        kpi_slice_id = request.slice_id.slice_uuid.uuid
+        kpi_connection_id = request.connection_id.connection_uuid.uuid
+        if request.kpi_id.kpi_id.uuid != "":
+            response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+            # Here the code to modify an existing kpi
+        else:
+            data = self.management_db.insert_KPI(
+                kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, kpi_slice_id,
+                kpi_connection_id)
+            response.kpi_id.uuid = str(data)
+        return response
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def DeleteKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Empty:
+        kpi_id = int(request.kpi_id.uuid)
+        kpi = self.management_db.get_KPI(kpi_id)
+        if kpi:
+            self.management_db.delete_KPI(kpi_id)
+        else:
+            LOGGER.info('DeleteKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        return Empty()
 
-        LOGGER.info('DeleteKpi')
-        try:
-            LOGGER.debug(f'DeleteKpi with KpiID: {request.kpi_id.uuid}')
-            kpi_id = int(request.kpi_id.uuid)
-            kpi = self.management_db.get_KPI(kpi_id)
-            if kpi:
-                self.management_db.delete_KPI(kpi_id)
-            else:
-                LOGGER.info('DeleteKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-            return Empty()
-        except ServiceException as e:
-            LOGGER.exception('DeleteKpi exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('DeleteKpi exception')
-
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext) -> KpiDescriptor:
-        LOGGER.info('getting Kpi by KpiID')
-        try:
-            kpi_id = request.kpi_id.uuid
-            kpi_db = self.management_db.get_KPI(int(kpi_id))
-            kpiDescriptor = KpiDescriptor()
-            if kpi_db is None:
-                LOGGER.info('GetKpiDescriptor error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-            else:
-                kpiDescriptor.kpi_description                       = kpi_db[1]
-                kpiDescriptor.kpi_sample_type                       = kpi_db[2]
-                kpiDescriptor.device_id.device_uuid.uuid            = str(kpi_db[3])
-                kpiDescriptor.endpoint_id.endpoint_uuid.uuid        = str(kpi_db[4])
-                kpiDescriptor.service_id.service_uuid.uuid          = str(kpi_db[5])
-                kpiDescriptor.slice_id.slice_uuid.uuid              = str(kpi_db[6])
-                kpiDescriptor.connection_id.connection_uuid.uuid    = str(kpi_db[7])
-            return kpiDescriptor
-        except ServiceException as e:
-            LOGGER.exception('GetKpiDescriptor exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception:  # pragma: no cover
-            LOGGER.exception('GetKpiDescriptor exception')
-
+        kpi_id = request.kpi_id.uuid
+        kpi_db = self.management_db.get_KPI(int(kpi_id))
+        kpiDescriptor = KpiDescriptor()
+        if kpi_db is None:
+            LOGGER.info('GetKpiDescriptor error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        else:
+            kpiDescriptor.kpi_description                       = kpi_db[1]
+            kpiDescriptor.kpi_sample_type                       = kpi_db[2]
+            kpiDescriptor.device_id.device_uuid.uuid            = str(kpi_db[3])
+            kpiDescriptor.endpoint_id.endpoint_uuid.uuid        = str(kpi_db[4])
+            kpiDescriptor.service_id.service_uuid.uuid          = str(kpi_db[5])
+            kpiDescriptor.slice_id.slice_uuid.uuid              = str(kpi_db[6])
+            kpiDescriptor.connection_id.connection_uuid.uuid    = str(kpi_db[7])
+        return kpiDescriptor
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetKpiDescriptorList(self, request: Empty, grpc_context: grpc.ServicerContext) -> KpiDescriptorList:
-
-        LOGGER.info('GetKpiDescriptorList')
-        try:
-            kpi_descriptor_list = KpiDescriptorList()
-
-            data = self.management_db.get_KPIS()
-            LOGGER.debug(f"data: {data}")
-
-            for item in data:
-                kpi_descriptor = KpiDescriptor()
-                kpi_descriptor.kpi_id.kpi_id.uuid                   = str(item[0])
-                kpi_descriptor.kpi_description                      = item[1]
-                kpi_descriptor.kpi_sample_type                      = item[2]
-                kpi_descriptor.device_id.device_uuid.uuid           = str(item[3])
-                kpi_descriptor.endpoint_id.endpoint_uuid.uuid       = str(item[4])
-                kpi_descriptor.service_id.service_uuid.uuid         = str(item[5])
-                kpi_descriptor.slice_id.slice_uuid.uuid             = str(item[6])
-                kpi_descriptor.connection_id.connection_uuid.uuid   = str(item[7])
-
-                kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor)
-
-            return kpi_descriptor_list
-        except ServiceException as e:
-            LOGGER.exception('GetKpiDescriptorList exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetKpiDescriptorList exception')
-
+        kpi_descriptor_list = KpiDescriptorList()
+        data = self.management_db.get_KPIS()
+        LOGGER.debug(f"data: {data}")
+        for item in data:
+            kpi_descriptor = KpiDescriptor()
+            kpi_descriptor.kpi_id.kpi_id.uuid                   = str(item[0])
+            kpi_descriptor.kpi_description                      = item[1]
+            kpi_descriptor.kpi_sample_type                      = item[2]
+            kpi_descriptor.device_id.device_uuid.uuid           = str(item[3])
+            kpi_descriptor.endpoint_id.endpoint_uuid.uuid       = str(item[4])
+            kpi_descriptor.service_id.service_uuid.uuid         = str(item[5])
+            kpi_descriptor.slice_id.slice_uuid.uuid             = str(item[6])
+            kpi_descriptor.connection_id.connection_uuid.uuid   = str(item[7])
+            kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor)
+        return kpi_descriptor_list
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def IncludeKpi(self, request: Kpi, grpc_context: grpc.ServicerContext) -> Empty:
+        kpi_id = request.kpi_id.kpi_id.uuid
+        kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
 
-        LOGGER.info('IncludeKpi')
-
-        try:
-            kpi_id = request.kpi_id.kpi_id.uuid
+        if kpiDescriptor is None:
+            LOGGER.info('IncludeKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        else:
+            kpiSampleType = KpiSampleType.Name(kpiDescriptor.kpi_sample_type).upper().replace('KPISAMPLETYPE_', '')
+            kpiId = kpi_id
+            deviceId = kpiDescriptor.device_id.device_uuid.uuid
+            endpointId = kpiDescriptor.endpoint_id.endpoint_uuid.uuid
+            serviceId = kpiDescriptor.service_id.service_uuid.uuid
+            sliceId   = kpiDescriptor.slice_id.slice_uuid.uuid
+            connectionId = kpiDescriptor.connection_id.connection_uuid.uuid
+            time_stamp = request.timestamp.timestamp
+            kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
+
+            # Build the structure to be included as point in the MetricsDB
+            self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, kpi_value)
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def MonitorKpi(self, request: MonitorKpiRequest, grpc_context: grpc.ServicerContext) -> Empty:
+        kpi_id = int(request.kpi_id.kpi_id.uuid)
+        kpi = self.management_db.get_KPI(kpi_id)
+        response = Empty()
+        if kpi:
+            # Sets the request to send to the device service
+            monitor_device_request = MonitoringSettings()
             kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
+            monitor_device_request.kpi_descriptor.CopyFrom(kpiDescriptor)
+            monitor_device_request.kpi_id.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+            monitor_device_request.sampling_duration_s = request.monitoring_window_s
+            monitor_device_request.sampling_interval_s = request.sampling_rate_s
+            if not self.management_db.check_monitoring_flag(kpi_id):
+                device_client = DeviceClient()
+                device_client.MonitorDeviceKpi(monitor_device_request)
+                self.management_db.set_monitoring_flag(kpi_id,True)
+                self.management_db.check_monitoring_flag(kpi_id)
+            else:
+                LOGGER.warning('MonitorKpi warning: KpiID({:s}) is currently being monitored'.format(str(kpi_id)))
+        else:
+            LOGGER.info('MonitorKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        return response
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> RawKpiTable:
+        raw_kpi_table = RawKpiTable()
+        kpi_id_list             = request.kpi_ids
+        monitoring_window_s     = request.monitoring_window_s
+        last_n_samples          = request.last_n_samples
+        start_timestamp         = request.start_timestamp.timestamp
+        end_timestamp           = request.end_timestamp.timestamp
+
+        # Check if all the Kpi_ids exist
+        for item in kpi_id_list:
+            kpi_id = item.kpi_id.uuid
+            kpiDescriptor = self.GetKpiDescriptor(item, grpc_context)
             if kpiDescriptor is None:
-                LOGGER.info('IncludeKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+                LOGGER.info('QueryKpiData error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+                break
             else:
-                kpiSampleType = KpiSampleType.Name(kpiDescriptor.kpi_sample_type).upper().replace('KPISAMPLETYPE_', '')
-                kpiId = kpi_id
-                deviceId = kpiDescriptor.device_id.device_uuid.uuid
-                endpointId = kpiDescriptor.endpoint_id.endpoint_uuid.uuid
-                serviceId = kpiDescriptor.service_id.service_uuid.uuid
-                sliceId   = kpiDescriptor.slice_id.slice_uuid.uuid
-                connectionId = kpiDescriptor.connection_id.connection_uuid.uuid
-                time_stamp = request.timestamp.timestamp
-                kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
-
-                # Build the structure to be included as point in the MetricsDB
-                self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, kpi_value)
-
-            return Empty()
-        except ServiceException as e:
-            LOGGER.exception('IncludeKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception:  # pragma: no cover
-            LOGGER.exception('IncludeKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-
-    def MonitorKpi(self, request: MonitorKpiRequest, grpc_context: grpc.ServicerContext) -> Empty:
-
-        LOGGER.info('MonitorKpi')
-        try:
-            kpi_id = int(request.kpi_id.kpi_id.uuid)
-            kpi = self.management_db.get_KPI(kpi_id)
-            response = Empty()
-
-            if kpi:
-                # Sets the request to send to the device service
-                monitor_device_request = MonitoringSettings()
-
-                kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
+                # Execute query per Kpi_id and introduce their kpi_list in the table
+                kpi_list = self.metrics_db.get_raw_kpi_list(kpi_id,monitoring_window_s,last_n_samples,start_timestamp,end_timestamp)
+                raw_kpi_list = RawKpiList()
+                raw_kpi_list.kpi_id.kpi_id.uuid = kpi_id
 
-                monitor_device_request.kpi_descriptor.CopyFrom(kpiDescriptor)
-                monitor_device_request.kpi_id.kpi_id.uuid = request.kpi_id.kpi_id.uuid
-                monitor_device_request.sampling_duration_s = request.monitoring_window_s
-                monitor_device_request.sampling_interval_s = request.sampling_rate_s
+                LOGGER.debug(str(kpi_list))
 
-                if not self.management_db.check_monitoring_flag(kpi_id):
-                    device_client = DeviceClient()
-                    device_client.MonitorDeviceKpi(monitor_device_request)
-                    self.management_db.set_monitoring_flag(kpi_id,True)
-                    self.management_db.check_monitoring_flag(kpi_id)
+                if kpi_list is None:
+                    LOGGER.info('QueryKpiData error: KpiID({:s}): points not found in metrics database'.format(str(kpi_id)))
                 else:
-                    LOGGER.warning('MonitorKpi warning: KpiID({:s}) is currently being monitored'.format(str(kpi_id)))
-            else:
-                LOGGER.info('MonitorKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-            return response
-        except ServiceException as e:
-            LOGGER.exception('MonitorKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('MonitorKpi exception')
-            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
-            # CREATEKPI_COUNTER_FAILED.inc()
+                    for item in kpi_list:
+                        raw_kpi = RawKpi()
+                        raw_kpi.timestamp.timestamp = timestamp_string_to_float(item[0])
+                        raw_kpi.kpi_value.floatVal  = item[1]
+                        raw_kpi_list.raw_kpis.append(raw_kpi)
 
-    def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> RawKpiTable:
+                raw_kpi_table.raw_kpi_lists.append(raw_kpi_list)
+        return raw_kpi_table
 
-        LOGGER.info('QueryKpiData')
-        try:
-            raw_kpi_table = RawKpiTable()
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetKpiSubscription(self, request: SubsDescriptor, grpc_context: grpc.ServicerContext) -> SubsResponse:
+        subs_queue = Queue()
+
+        kpi_id = request.kpi_id.kpi_id.uuid
+        sampling_duration_s = request.sampling_duration_s
+        sampling_interval_s = request.sampling_interval_s
+        start_timestamp = request.start_timestamp.timestamp
+        end_timestamp = request.end_timestamp.timestamp
+
+        subscriber = "localhost"  # Investigate how to get info from the requester
+
+        subs_id = self.management_db.insert_subscription(kpi_id, subscriber, sampling_duration_s,
+                                                            sampling_interval_s, start_timestamp, end_timestamp)
+        self.subs_manager.create_subscription(subs_queue, subs_id, kpi_id, sampling_interval_s, sampling_duration_s,
+                                                start_timestamp, end_timestamp)
+
+        # parse queue to append kpis into the list
+        while True:
+            while not subs_queue.empty():
+                subs_response = SubsResponse()
+                list = subs_queue.get_nowait()
+                for item in list:
+                    kpi = Kpi()
+                    kpi.kpi_id.kpi_id.uuid = str(item[0])
+                    kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
+                    kpi.kpi_value.floatVal = item[2]  # This must be improved
+                    subs_response.kpi_list.kpi.append(kpi)
+                subs_response.subs_id.subs_id.uuid = str(subs_id)
+                yield subs_response
+            if timestamp_utcnow_to_float() > end_timestamp:
+                break
+        # yield subs_response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetSubsDescriptor(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> SubsDescriptor:
+        subs_id = request.subs_id.uuid
+        subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
+        response = SubsDescriptor()
+        if subs_db is None:
+            LOGGER.info('GetSubsDescriptor error: SubsID({:s}): not found in database'.format(str(subs_id)))
+        else:
+            LOGGER.debug(subs_db)
+            response.subs_id.subs_id.uuid = str(subs_db[0])
+            response.kpi_id.kpi_id.uuid = str(subs_db[1])
+            response.sampling_duration_s = subs_db[3]
+            response.sampling_interval_s = subs_db[4]
+            response.start_timestamp.timestamp = subs_db[5]
+            response.end_timestamp.timestamp = subs_db[6]
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetSubscriptions(self, request: Empty, grpc_context: grpc.ServicerContext) -> SubsList:
+        response = SubsList()
+        data = self.management_db.get_subscriptions()
+        for subs_db in data:
+            subs_descriptor = SubsDescriptor()
+            subs_descriptor.subs_id.subs_id.uuid = str(subs_db[0])
+            subs_descriptor.kpi_id.kpi_id.uuid = str(subs_db[1])
+            subs_descriptor.sampling_duration_s = subs_db[3]
+            subs_descriptor.sampling_interval_s = subs_db[4]
+            subs_descriptor.start_timestamp.timestamp = subs_db[5]
+            subs_descriptor.end_timestamp.timestamp = subs_db[6]
+            response.subs_descriptor.append(subs_descriptor)
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def DeleteSubscription(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> Empty:
+        subs_id = int(request.subs_id.uuid)
+        subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
+        if subs_db:
+            self.management_db.delete_subscription(subs_id)
+        else:
+            LOGGER.info('DeleteSubscription error: SubsID({:s}): not found in database'.format(str(subs_id)))
+        return Empty()
 
-            LOGGER.debug(str(request))
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetKpiAlarm(self, request: AlarmDescriptor, grpc_context: grpc.ServicerContext) -> AlarmResponse:
+        response = AlarmID()
+        alarm_description = request.alarm_description
+        alarm_name = request.name
+        kpi_id = request.kpi_id.kpi_id.uuid
+        kpi_min_value = float(request.kpi_value_range.kpiMinValue.floatVal)
+        kpi_max_value = float(request.kpi_value_range.kpiMaxValue.floatVal)
+        in_range = request.kpi_value_range.inRange
+        include_min_value = request.kpi_value_range.includeMinValue
+        include_max_value = request.kpi_value_range.includeMaxValue
+        timestamp = request.timestamp.timestamp
+        LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}")
+        if request.alarm_id.alarm_id.uuid != "":
+            alarm_id = request.alarm_id.alarm_id.uuid
+            # Here the code to modify an existing alarm
+        else:
+            alarm_id = self.management_db.insert_alarm(alarm_description, alarm_name, kpi_id, kpi_min_value,
+                                                        kpi_max_value,
+                                                        in_range, include_min_value, include_max_value)
+            LOGGER.debug(f"AlarmID: {alarm_id}")
+        response.alarm_id.uuid = str(alarm_id)
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetAlarms(self, request: Empty, grpc_context: grpc.ServicerContext) -> AlarmList:
+        response = AlarmList()
+        data = self.management_db.get_alarms()
 
-            kpi_id_list             = request.kpi_ids
-            monitoring_window_s     = request.monitoring_window_s
-            last_n_samples          = request.last_n_samples
-            start_timestamp         = request.start_timestamp.timestamp
-            end_timestamp           = request.end_timestamp.timestamp
+        for alarm in data:
+            alarm_descriptor = AlarmDescriptor()
 
-            # Check if all the Kpi_ids exist
-            for item in kpi_id_list:
-                kpi_id = item.kpi_id.uuid
+            alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm[0])
+            alarm_descriptor.alarm_description = alarm[1]
+            alarm_descriptor.name = alarm[2]
+            alarm_descriptor.kpi_id.kpi_id.uuid = str(alarm[3])
+            alarm_descriptor.kpi_value_range.kpiMinValue.floatVal = alarm[4]
+            alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
+            alarm_descriptor.kpi_value_range.inRange = bool(alarm[6])
+            alarm_descriptor.kpi_value_range.includeMinValue = bool(alarm[7])
+            alarm_descriptor.kpi_value_range.includeMaxValue = bool(alarm[8])
 
-                kpiDescriptor = self.GetKpiDescriptor(item, grpc_context)
-                if kpiDescriptor is None:
-                    LOGGER.info('QueryKpiData error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-                    break
-                else:
-                    # Execute query per Kpi_id and introduce their kpi_list in the table
-                    kpi_list = self.metrics_db.get_raw_kpi_list(kpi_id,monitoring_window_s,last_n_samples,start_timestamp,end_timestamp)
-                    raw_kpi_list = RawKpiList()
-                    raw_kpi_list.kpi_id.kpi_id.uuid = kpi_id
-
-                    LOGGER.debug(str(kpi_list))
-
-                    if kpi_list is None:
-                        LOGGER.info('QueryKpiData error: KpiID({:s}): points not found in metrics database'.format(str(kpi_id)))
-                    else:
-                        for item in kpi_list:
-                            raw_kpi = RawKpi()
-                            raw_kpi.timestamp.timestamp = timestamp_string_to_float(item[0])
-                            raw_kpi.kpi_value.floatVal  = item[1]
-                            raw_kpi_list.raw_kpis.append(raw_kpi)
-
-                    raw_kpi_table.raw_kpi_lists.append(raw_kpi_list)
-
-            return raw_kpi_table
-        except ServiceException as e:
-            LOGGER.exception('QueryKpiData exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('QueryKpiData exception')
+            response.alarm_descriptor.append(alarm_descriptor)
 
-    def SetKpiSubscription(self, request: SubsDescriptor, grpc_context: grpc.ServicerContext) -> SubsResponse:
+        return response
 
-        LOGGER.info('SubscribeKpi')
-        try:
-            subs_queue = Queue()
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetAlarmDescriptor(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> AlarmDescriptor:
+        alarm_id = request.alarm_id.uuid
+        LOGGER.debug(alarm_id)
+        alarm = self.management_db.get_alarm(alarm_id)
+        response = AlarmDescriptor()
+
+        if alarm:
+            LOGGER.debug(f"{alarm}")
+            response.alarm_id.alarm_id.uuid = str(alarm_id)
+            response.alarm_description = alarm[1]
+            response.name = alarm[2]
+            response.kpi_id.kpi_id.uuid = str(alarm[3])
+            response.kpi_value_range.kpiMinValue.floatVal = alarm[4]
+            response.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
+            response.kpi_value_range.inRange = bool(alarm[6])
+            response.kpi_value_range.includeMinValue = bool(alarm[7])
+            response.kpi_value_range.includeMaxValue = bool(alarm[8])
+        else:
+            LOGGER.info('GetAlarmDescriptor error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+            response.alarm_id.alarm_id.uuid = "NoID"
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetAlarmResponseStream(
+        self, request: AlarmSubscription, grpc_context: grpc.ServicerContext
+    ) -> Iterator[AlarmResponse]:
+        alarm_id = request.alarm_id.alarm_id.uuid
+        alarm_data = self.management_db.get_alarm(alarm_id)
+        real_start_time = timestamp_utcnow_to_float()
+
+        if alarm_data:
+            LOGGER.debug(f"{alarm_data}")
+            alarm_queue = Queue()
 
-            kpi_id = request.kpi_id.kpi_id.uuid
-            sampling_duration_s = request.sampling_duration_s
-            sampling_interval_s = request.sampling_interval_s
-            start_timestamp = request.start_timestamp.timestamp
-            end_timestamp = request.end_timestamp.timestamp
+            alarm_id = request.alarm_id.alarm_id.uuid
+            kpi_id = alarm_data[3]
+            kpiMinValue = alarm_data[4]
+            kpiMaxValue = alarm_data[5]
+            inRange = alarm_data[6]
+            includeMinValue = alarm_data[7]
+            includeMaxValue = alarm_data[8]
+            subscription_frequency_ms = request.subscription_frequency_ms
+            subscription_timeout_s = request.subscription_timeout_s
 
-            subscriber = "localhost"  # Investigate how to get info from the requester
+            end_timestamp = real_start_time + subscription_timeout_s
 
-            subs_id = self.management_db.insert_subscription(kpi_id, subscriber, sampling_duration_s,
-                                                             sampling_interval_s, start_timestamp, end_timestamp)
-            self.subs_manager.create_subscription(subs_queue, subs_id, kpi_id, sampling_interval_s, sampling_duration_s,
-                                                  start_timestamp, end_timestamp)
+            self.alarm_manager.create_alarm(alarm_queue, alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange,
+                                            includeMinValue, includeMaxValue, subscription_frequency_ms,
+                                            subscription_timeout_s)
 
-            # parse queue to append kpis into the list
             while True:
-                while not subs_queue.empty():
-                    subs_response = SubsResponse()
-                    list = subs_queue.get_nowait()
+                while not alarm_queue.empty():
+                    alarm_response = AlarmResponse()
+                    list = alarm_queue.get_nowait()
+                    size = len(list)
                     for item in list:
                         kpi = Kpi()
                         kpi.kpi_id.kpi_id.uuid = str(item[0])
                         kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
                         kpi.kpi_value.floatVal = item[2]  # This must be improved
-                        subs_response.kpi_list.kpi.append(kpi)
-                    subs_response.subs_id.subs_id.uuid = str(subs_id)
-                    yield subs_response
+                        alarm_response.kpi_list.kpi.append(kpi)
+                    alarm_response.alarm_id.alarm_id.uuid = alarm_id
+                    yield alarm_response
                 if timestamp_utcnow_to_float() > end_timestamp:
                     break
-            # yield subs_response
-        except ServiceException as e:
-            LOGGER.exception('SubscribeKpi exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('SubscribeKpi exception')
-
-    def GetSubsDescriptor(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> SubsDescriptor:
-
-        LOGGER.info('GetSubsDescriptor')
-        try:
-            subs_id = request.subs_id.uuid
-            subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
-            response = SubsDescriptor()
-            if subs_db is None:
-                LOGGER.info('GetSubsDescriptor error: SubsID({:s}): not found in database'.format(str(subs_id)))
-            else:
-                LOGGER.debug(subs_db)
-                response.subs_id.subs_id.uuid = str(subs_db[0])
-                response.kpi_id.kpi_id.uuid = str(subs_db[1])
-                response.sampling_duration_s = subs_db[3]
-                response.sampling_interval_s = subs_db[4]
-                response.start_timestamp.timestamp = subs_db[5]
-                response.end_timestamp.timestamp = subs_db[6]
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetSubsDescriptor exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetSubsDescriptor exception')
-
-    def GetSubscriptions(self, request: Empty, grpc_context: grpc.ServicerContext) -> SubsList:
-
-        LOGGER.info('GetSubscriptions')
-        try:
-            response = SubsList()
-            data = self.management_db.get_subscriptions()
-
-            for subs_db in data:
-                subs_descriptor = SubsDescriptor()
-
-                subs_descriptor.subs_id.subs_id.uuid = str(subs_db[0])
-                subs_descriptor.kpi_id.kpi_id.uuid = str(subs_db[1])
-                subs_descriptor.sampling_duration_s = subs_db[3]
-                subs_descriptor.sampling_interval_s = subs_db[4]
-                subs_descriptor.start_timestamp.timestamp = subs_db[5]
-                subs_descriptor.end_timestamp.timestamp = subs_db[6]
-
-                response.subs_descriptor.append(subs_descriptor)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetSubscriptions exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetSubscriptions exception')
-
-    def DeleteSubscription(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> Empty:
-
-        LOGGER.info('DeleteSubscription')
-        try:
-            LOGGER.debug(f'DeleteSubscription with SubsID: {request.subs_id.uuid}')
-            subs_id = int(request.subs_id.uuid)
-            subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
-            if subs_db:
-                self.management_db.delete_subscription(subs_id)
-            else:
-                LOGGER.info('DeleteSubscription error: SubsID({:s}): not found in database'.format(str(subs_id)))
-            return Empty()
-        except ServiceException as e:
-            LOGGER.exception('DeleteSubscription exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('DeleteSubscription exception')
-
-    def SetKpiAlarm(self, request: AlarmDescriptor, grpc_context: grpc.ServicerContext) -> AlarmResponse:
-
-        LOGGER.info('SetKpiAlarm')
-        try:
-            response = AlarmID()
-
-            alarm_description = request.alarm_description
-            alarm_name = request.name
-            kpi_id = request.kpi_id.kpi_id.uuid
-            kpi_min_value = request.kpi_value_range.kpiMinValue.floatVal
-            kpi_max_value = request.kpi_value_range.kpiMaxValue.floatVal
-            in_range = request.kpi_value_range.inRange
-            include_min_value = request.kpi_value_range.includeMinValue
-            include_max_value = request.kpi_value_range.includeMaxValue
-            timestamp = request.timestamp.timestamp
-
-            LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}")
-
-            if request.alarm_id.alarm_id.uuid != "":
-                alarm_id = request.alarm_id.alarm_id.uuid
-            #     Here the code to modify an existing alarm
-            else:
-                alarm_id = self.management_db.insert_alarm(alarm_description, alarm_name, kpi_id, kpi_min_value,
-                                                           kpi_max_value,
-                                                           in_range, include_min_value, include_max_value)
-                LOGGER.debug(f"AlarmID: {alarm_id}")
-            response.alarm_id.uuid = str(alarm_id)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('SetKpiAlarm exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('SetKpiAlarm exception')
-
-    def GetAlarms(self, request: Empty, grpc_context: grpc.ServicerContext) -> AlarmList:
-
-        LOGGER.info('GetAlarms')
-        try:
-            response = AlarmList()
-            data = self.management_db.get_alarms()
-
-            for alarm in data:
-                alarm_descriptor = AlarmDescriptor()
-
-                alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm[0])
-                alarm_descriptor.alarm_description = alarm[1]
-                alarm_descriptor.name = alarm[2]
-                alarm_descriptor.kpi_id.kpi_id.uuid = str(alarm[3])
-                alarm_descriptor.kpi_value_range.kpiMinValue.floatVal = alarm[4]
-                alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
-                alarm_descriptor.kpi_value_range.inRange = bool(alarm[6])
-                alarm_descriptor.kpi_value_range.includeMinValue = bool(alarm[7])
-                alarm_descriptor.kpi_value_range.includeMaxValue = bool(alarm[8])
-
-                response.alarm_descriptor.append(alarm_descriptor)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetAlarms exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetAlarms exception')
-
-    def GetAlarmDescriptor(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> AlarmDescriptor:
-
-        LOGGER.info('GetAlarmDescriptor')
-        try:
-            alarm_id = request.alarm_id.uuid
-            LOGGER.debug(alarm_id)
-            alarm = self.management_db.get_alarm(alarm_id)
-            response = AlarmDescriptor()
-
-            if alarm:
-                LOGGER.debug(f"{alarm}")
-                response.alarm_id.alarm_id.uuid = str(alarm_id)
-                response.alarm_description = alarm[1]
-                response.name = alarm[2]
-                response.kpi_id.kpi_id.uuid = str(alarm[3])
-                response.kpi_value_range.kpiMinValue.floatVal = alarm[4]
-                response.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
-                response.kpi_value_range.inRange = bool(alarm[6])
-                response.kpi_value_range.includeMinValue = bool(alarm[7])
-                response.kpi_value_range.includeMaxValue = bool(alarm[8])
-            else:
-                LOGGER.info('GetAlarmDescriptor error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
-                response.alarm_id.alarm_id.uuid = "NoID"
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetAlarmDescriptor exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetAlarmDescriptor exception')
-
-    def GetAlarmResponseStream(self, request: AlarmSubscription, grpc_context: grpc.ServicerContext) -> Iterator[
-        AlarmResponse]:
-
-        LOGGER.info('GetAlarmResponseStream')
-        try:
-            alarm_id = request.alarm_id.alarm_id.uuid
-            alarm_data = self.management_db.get_alarm(alarm_id)
-            real_start_time = timestamp_utcnow_to_float()
-
-            if alarm_data:
-                LOGGER.debug(f"{alarm_data}")
-                alarm_queue = Queue()
-
-                alarm_id = request.alarm_id.alarm_id.uuid
-                kpi_id = alarm_data[3]
-                kpiMinValue = alarm_data[4]
-                kpiMaxValue = alarm_data[5]
-                inRange = alarm_data[6]
-                includeMinValue = alarm_data[7]
-                includeMaxValue = alarm_data[8]
-                subscription_frequency_ms = request.subscription_frequency_ms
-                subscription_timeout_s = request.subscription_timeout_s
-
-                end_timestamp = real_start_time + subscription_timeout_s
-
-                self.alarm_manager.create_alarm(alarm_queue, alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange,
-                                                includeMinValue, includeMaxValue, subscription_frequency_ms,
-                                                subscription_timeout_s)
-
-                while True:
-                    while not alarm_queue.empty():
-                        alarm_response = AlarmResponse()
-                        list = alarm_queue.get_nowait()
-                        size = len(list)
-                        for item in list:
-                            kpi = Kpi()
-                            kpi.kpi_id.kpi_id.uuid = str(item[0])
-                            kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
-                            kpi.kpi_value.floatVal = item[2]  # This must be improved
-                            alarm_response.kpi_list.kpi.append(kpi)
-                        alarm_response.alarm_id.alarm_id.uuid = alarm_id
-                        yield alarm_response
-                    if timestamp_utcnow_to_float() > end_timestamp:
-                        break
-            else:
-                LOGGER.info('GetAlarmResponseStream error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
-                alarm_response = AlarmResponse()
-                alarm_response.alarm_id.alarm_id.uuid = "NoID"
-                return alarm_response
-        except ServiceException as e:
-            LOGGER.exception('GetAlarmResponseStream exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetAlarmResponseStream exception')
+        else:
+            LOGGER.info('GetAlarmResponseStream error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+            alarm_response = AlarmResponse()
+            alarm_response.alarm_id.alarm_id.uuid = "NoID"
+            return alarm_response
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def DeleteAlarm(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> Empty:
+        alarm_id = int(request.alarm_id.uuid)
+        alarm = self.management_db.get_alarm(alarm_id)
+        response = Empty()
+        if alarm:
+            self.alarm_manager.delete_alarm(alarm_id)
+            self.management_db.delete_alarm(alarm_id)
+        else:
+            LOGGER.info('DeleteAlarm error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+        return response
 
-        LOGGER.info('DeleteAlarm')
-        try:
-            LOGGER.debug(f'DeleteAlarm with AlarmID: {request.alarm_id.uuid}')
-            alarm_id = int(request.alarm_id.uuid)
-            alarm = self.management_db.get_alarm(alarm_id)
-            response = Empty()
-            if alarm:
-                self.alarm_manager.delete_alarm(alarm_id)
-                self.management_db.delete_alarm(alarm_id)
-            else:
-                LOGGER.info('DeleteAlarm error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
-            return response
-        except ServiceException as e:
-            LOGGER.exception('DeleteAlarm exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('DeleteAlarm exception')
-
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetStreamKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Iterator[Kpi]:
-
-        LOGGER.info('GetStreamKpi')
-
         kpi_id = request.kpi_id.uuid
         kpi_db = self.management_db.get_KPI(int(kpi_id))
         response = Kpi()
@@ -581,36 +420,23 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         else:
             yield response
 
-    @MONITORING_GETINSTANTKPI_REQUEST_TIME.time()
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetInstantKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Kpi:
-
-        LOGGER.info('GetInstantKpi')
-        try:
-            kpi_id = request.kpi_id.uuid
-            response = Kpi()
-            if kpi_id == "":
-                LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-                response.kpi_id.kpi_id.uuid = "NoID"
+        kpi_id = request.kpi_id.uuid
+        response = Kpi()
+        if kpi_id == "":
+            LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+            response.kpi_id.kpi_id.uuid = "NoID"
+        else:
+            query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE_MONITORING_KPIS} " \
+                    f"WHERE kpi_id = '{kpi_id}' LATEST ON timestamp PARTITION BY kpi_id"
+            data = self.metrics_db.run_query(query)
+            LOGGER.debug(data)
+            if len(data) == 0:
+                response.kpi_id.kpi_id.uuid = request.kpi_id.uuid
             else:
-                query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE_MONITORING_KPIS} " \
-                        f"WHERE kpi_id = '{kpi_id}' LATEST ON timestamp PARTITION BY kpi_id"
-                data = self.metrics_db.run_query(query)
-                LOGGER.debug(data)
-                if len(data) == 0:
-                    response.kpi_id.kpi_id.uuid = request.kpi_id.uuid
-                else:
-                    _data = data[0]
-                    response.kpi_id.kpi_id.uuid = str(_data[0])
-                    response.timestamp.timestamp = timestamp_string_to_float(_data[1])
-                    response.kpi_value.floatVal = _data[2]
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetInstantKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetInstantKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
-
+                _data = data[0]
+                response.kpi_id.kpi_id.uuid = str(_data[0])
+                response.timestamp.timestamp = timestamp_string_to_float(_data[1])
+                response.kpi_value.floatVal = _data[2]
+        return response
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index c883f9d141fc28645761641b0ccd10294b538bd2..4e84431a5438e1536c92ca644bd5005deba545a4 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -25,7 +25,6 @@ from grpc._channel import _MultiThreadedRendezvous
 from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
-#from common.logger import getJSONLogger
 from common.proto.context_pb2 import DeviceOperationalStatusEnum, EventTypeEnum, DeviceEvent, Device, Empty
 from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
diff --git a/src/pathcomp/backend/Makefile b/src/pathcomp/backend/Makefile
index 058701098308620cd1a71f3718b934c63646d42a..56d249510497785316c2e0c36ea8ee0e28c461b1 100644
--- a/src/pathcomp/backend/Makefile
+++ b/src/pathcomp/backend/Makefile
@@ -30,16 +30,16 @@ coverage: CFLAGS  += -O0 -ggdb -g -DDEBUG -fprofile-arcs -ftest-coverage -DGCOV
 coverage: LDFLAGS += -g -lgcov --coverage -fprofile-arcs -ftest-coverage -DGCOV
 coverage: pathComp-cvr
 
-pathComp: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o 
-	gcc -o pathComp pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o \
+pathComp: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o 
+	gcc -o pathComp pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o \
 		-L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -luuid $(LDFLAGS) $(LDLIBS)
 
-pathComp-dbg: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o 
-	gcc -o pathComp-dbg pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o \
+pathComp-dbg: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o 
+	gcc -o pathComp-dbg pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o \
 		-L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -luuid $(LDFLAGS) $(LDLIBS)
 
-pathComp-cvr: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o 
-	gcc -o pathComp-cvr pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o \
+pathComp-cvr: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o 
+	gcc -o pathComp-cvr pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o \
 		-L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -luuid $(LDFLAGS) $(LDLIBS)
 
 pathComp_log.o: pathComp_log.h pathComp_log.c
@@ -56,8 +56,11 @@ pathComp_ksp.o: pathComp_log.h pathComp_tools.h pathComp_ksp.h pathComp_ksp.c
 
 pathComp_sp.o: pathComp_log.h pathComp_tools.h pathComp_sp.h pathComp_sp.c
 	$(CC) $(CFLAGS) -c pathComp_sp.c -o pathComp_sp.o
+
+pathComp_ear.o: pathComp_log.h pathComp_tools.h pathComp_ear.h pathComp_ear.c
+	$(CC) $(CFLAGS) -c pathComp_ear.c -o pathComp_ear.o
 	
-pathComp_RESTapi.o: pathComp_tools.h pathComp_log.h pathComp_cjson.h pathComp_ksp.h pathComp_sp.h pathComp_RESTapi.h pathComp_RESTapi.c
+pathComp_RESTapi.o: pathComp_tools.h pathComp_log.h pathComp_cjson.h pathComp_ksp.h pathComp_sp.h pathComp_ear.h pathComp_RESTapi.h pathComp_RESTapi.c
 	$(CC) $(CFLAGS) -c pathComp_RESTapi.c -o pathComp_RESTapi.o
 
 pathComp.o: pathComp_log.h pathComp_RESTapi.h pathComp.c pathComp.h
diff --git a/src/pathcomp/backend/pathComp.c b/src/pathcomp/backend/pathComp.c
index aa6c2b7341862a0115581abee7f977edabe93126..537cf378f1d6124ebc7c2a0140c0a408af547254 100644
--- a/src/pathcomp/backend/pathComp.c
+++ b/src/pathcomp/backend/pathComp.c
@@ -48,10 +48,8 @@ void my_gcov_handler(int signum)
 
 // External Variables
 FILE *logfile = NULL;
-
 // PATH COMP IP address API Client
 struct in_addr pathComp_IpAddr;
-
 // REST API ENABLED
 int RESTAPI_ENABLED = 0;
 
diff --git a/src/pathcomp/backend/pathComp_RESTapi.c b/src/pathcomp/backend/pathComp_RESTapi.c
index 8ee7f6d82537b7eab297334a0a0dcfad13f8af44..1780cfde2039b5907ab0f5696885e17deb56644c 100644
--- a/src/pathcomp/backend/pathComp_RESTapi.c
+++ b/src/pathcomp/backend/pathComp_RESTapi.c
@@ -36,6 +36,7 @@
 #include "pathComp_cjson.h"
 #include "pathComp_ksp.h"
 #include "pathComp_sp.h"
+#include "pathComp_ear.h"
 #include "pathComp_RESTapi.h"
 
 #define ISspace(x) isspace((int)(x))
@@ -50,9 +51,10 @@ guint CLIENT_ID = 0;
 guint32 paId_req = 0;
 
 // Global variables
-struct linkList_t* linkList;
-struct deviceList_t* deviceList;
-struct serviceList_t* serviceList;
+GList* linkList;
+GList* deviceList;
+GList* serviceList;
+GList* activeServList;
 
 gchar algId[MAX_ALG_ID_LENGTH];
 gboolean syncPath = FALSE;
@@ -78,8 +80,9 @@ gint find_rl_client_by_fd (gconstpointer data, gconstpointer userdata)
 	 struct pathComp_client *client = (struct pathComp_client*)data;
      gint fd = *(gint *)userdata; 
      
-    if (client->fd == fd)	
-		return 0;        
+	 if (client->fd == fd) {
+		 return 0;
+	 }
     return -1;	
 }
 
@@ -454,7 +457,6 @@ void rapi_response_json_contents (char *body, gint *length, struct compRouteOutp
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void rapi_response_ok (GIOChannel *source, gint httpCode, struct compRouteOutputList_t *compRouteOutputList) {
-    
 	gint ret = 0;
     
     //DEBUG_PC ("Creating the JSON Body and sending the response of the computed Route List");
@@ -526,8 +528,7 @@ void rapi_response_ok (GIOChannel *source, gint httpCode, struct compRouteOutput
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void rapi_response (GIOChannel *source, gint error)
-{
+void rapi_response (GIOChannel *source, gint error) {
 	 int ret = 0;	
 	 guchar buftmp[1024];
 	 char * buf = g_malloc0 (sizeof (char) * 2048000);
@@ -566,11 +567,94 @@ void rapi_response (GIOChannel *source, gint error)
 	 return;
 }
 
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief parsing topology Identifier Object (contains Context Id and Toplogy UUID) JSON object
+ *
+ * 	@param obj
+ *  @param topology_id
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_topology_Id(cJSON* obj, struct topology_id_t* topology_id) {
+	g_assert(topology_id);
+	// Get the context Id (UUID) from the topologyIdObj
+	cJSON* contextIdObj = cJSON_GetObjectItem(obj, "contextId");
+	if (cJSON_IsString(contextIdObj)) {
+		duplicate_string(topology_id->contextId, contextIdObj->valuestring);
+	}
+	// Get the topologyId (UUID) from the topologyIdObj
+	cJSON* topologyUuidObj = cJSON_GetObjectItem(obj, "topology_uuid");
+	if (cJSON_IsString(topologyUuidObj)) {
+		duplicate_string(topology_id->topology_uuid, topologyUuidObj->valuestring);
+	}
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief parsing EndpointIds JSON object
+ *
+ * 	@param item
+ *  @param serviceEndPointId
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_endPointsIds(cJSON* item, struct service_endpoints_id_t* serviceEndPointId) {
+	// Get the topology Id Object
+	cJSON* topologyIdObj = cJSON_GetObjectItem(item, "topology_id");
+	if (cJSON_IsObject(topologyIdObj)) {
+		parse_topology_Id(topologyIdObj, &serviceEndPointId->topology_id);
+	}
+	// Get the deviceId (UUID)
+	cJSON* deviceIdObj = cJSON_GetObjectItem(item, "device_id");
+	if (cJSON_IsString(deviceIdObj)) {
+		duplicate_string(serviceEndPointId->device_uuid, deviceIdObj->valuestring);
+		DEBUG_PC("DeviceId: %s", serviceEndPointId->device_uuid);
+	}
+	// Get the endpointId (UUID)
+	cJSON* endPointIdObj = cJSON_GetObjectItem(item, "endpoint_uuid");
+	if (cJSON_IsString(endPointIdObj)) {
+		duplicate_string(serviceEndPointId->endpoint_uuid, endPointIdObj->valuestring);
+		DEBUG_PC("EndPointId: %s", serviceEndPointId->endpoint_uuid);
+	}
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to parse the array of Endpoint Ids	of the active services
+ *
+ * 	@param endPointArray
+ *  @param actServ
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_act_service_endPointsIds_array(cJSON* endPointIdArray, struct activeService_t* actServ) {
+	g_assert(actServ);
+
+	for (gint i = 0; i < cJSON_GetArraySize(endPointIdArray); i++) {
+		actServ->num_service_endpoints_id++;
+		struct service_endpoints_id_t* serviceEndPointId = &(actServ->service_endpoints_id[i]);
+		cJSON* item = cJSON_GetArrayItem(endPointIdArray, i);
+		parse_endPointsIds(item, serviceEndPointId);
+	}
+	return;
+}
 
 ///////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_RESTapi.c
- * 	@brief Function used to parse the array of Endpoint Ids
+ * 	@brief Function used to parse the array of Endpoint Ids	of the requested services
  *
  * 	@param endPointArray
  *  @param s
@@ -586,35 +670,7 @@ void parse_service_endPointsIds_array(cJSON* endPointIdArray, struct service_t*
 		struct service_endpoints_id_t* serviceEndPointId = &(s->service_endpoints_id[i]);
 
 		cJSON* item = cJSON_GetArrayItem(endPointIdArray, i);
-
-		// Get the topology Id Object
-		cJSON* topologyIdObj = cJSON_GetObjectItem(item, "topology_id");
-		if (cJSON_IsObject(topologyIdObj)) {
-			// Get the context Id (UUID) from the topologyIdObj
-			cJSON* contextIdObj = cJSON_GetObjectItem(topologyIdObj, "contextId");
-			if (cJSON_IsString(contextIdObj)) {					
-				duplicate_string(serviceEndPointId->topology_id.contextId, contextIdObj->valuestring);
-				//DEBUG_PC("Service EndPoint [%d]-- ContextId: %s (uuid string format)", i + 1, serviceEndPointId->topology_id.contextId);
-			}
-			// Get the topologyId (UUID) from the topologyIdObj
-			cJSON* topologyUuidObj = cJSON_GetObjectItem(topologyIdObj, "topology_uuid");
-			if (cJSON_IsString(topologyUuidObj)) {				
-				duplicate_string(serviceEndPointId->topology_id.topology_uuid, topologyUuidObj->valuestring);
-				//DEBUG_PC("Service Endpoint (%d) -- TopologyId: %s (uuid string format)", i + 1, serviceEndPointId->topology_id.topology_uuid);
-			}			
-		}
-		// Get the deviceId (UUID)
-		cJSON* deviceIdObj = cJSON_GetObjectItem(item, "device_id");
-		if (cJSON_IsString(deviceIdObj)) {			
-			duplicate_string(serviceEndPointId->device_uuid, deviceIdObj->valuestring);
-			DEBUG_PC("[%d] - DeviceId: %s", i + 1, serviceEndPointId->device_uuid);
-		}
-		// Get the endpointId (UUID)
-		cJSON* endPointIdObj = cJSON_GetObjectItem(item, "endpoint_uuid");
-		if (cJSON_IsString(endPointIdObj)) {
-			duplicate_string(serviceEndPointId->endpoint_uuid, endPointIdObj->valuestring);
-			DEBUG_PC("[%d] EndPointId: %s", i + 1, serviceEndPointId->endpoint_uuid);
-		}		
+		parse_endPointsIds(item, serviceEndPointId);
 	}
 	return;
 }
@@ -632,11 +688,8 @@ void parse_service_endPointsIds_array(cJSON* endPointIdArray, struct service_t*
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_service_constraints(cJSON* constraintArray, struct service_t* s) {
-
 	for (gint i = 0; i < cJSON_GetArraySize(constraintArray); i++) {
-
 		s->num_service_constraints++;
-
 		struct constraint_t* constraint = &(s->constraints[i]);
 
 		cJSON* item = cJSON_GetArrayItem(constraintArray, i);
@@ -656,6 +709,38 @@ void parse_service_constraints(cJSON* constraintArray, struct service_t* s) {
 	return;
 }
 
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to parse the serviceId information from a JSON obj
+ *
+ * 	@param obj
+ *  @param serviceId
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_json_serviceId(cJSON* obj, struct serviceId_t* serviceId) {
+	g_assert(obj);
+	g_assert(serviceId);
+
+	// Get context Id uuid
+	cJSON* contextIdObj = cJSON_GetObjectItem(obj, "contextId");
+	if (cJSON_IsString(contextIdObj)) {
+		// convert the string in contextId->valuestring in uuid binary format
+		duplicate_string(serviceId->contextId, contextIdObj->valuestring);
+		DEBUG_PC("ContextId: %s (uuid string format)", serviceId->contextId);
+	}
+	// Get service Id uuid
+	cJSON* serviceUuidObj = cJSON_GetObjectItem(obj, "service_uuid");
+	if (cJSON_IsString(serviceUuidObj)) {
+		duplicate_string(serviceId->service_uuid, serviceUuidObj->valuestring);
+		DEBUG_PC("Service UUID: %s (uuid string format)", serviceId->service_uuid);
+	}
+	return;
+}
+
 ///////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_RESTapi.c
@@ -672,15 +757,15 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 
 	for (gint i = 0; i < cJSON_GetArraySize(serviceArray); i++)
 	{
-		serviceList->numServiceList++;
-		struct service_t* service = &(serviceList->services[i]); 
-
+		struct service_t* service = g_malloc0(sizeof(struct service_t));
+		if (service == NULL) {
+			DEBUG_PC("Memory allocation error ...");
+			exit(-1);
+		}
 		cJSON* item = cJSON_GetArrayItem(serviceArray, i);
-		
 		// Get the algorithm Id
 		cJSON* algIdItem = cJSON_GetObjectItem(item, "algId");
-		if (cJSON_IsString(algIdItem))
-		{
+		if (cJSON_IsString(algIdItem)) {
 			duplicate_string(service->algId, algIdItem->valuestring);
 			DEBUG_PC ("algId: %s", service->algId);
 			// assumed that all the services request the same algId
@@ -689,16 +774,13 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 
 		// Get the syncPaths
 		cJSON* synchPathObj = cJSON_GetObjectItemCaseSensitive(item, "syncPaths");
-		if (cJSON_IsBool(synchPathObj))
-		{
+		if (cJSON_IsBool(synchPathObj)) {
 			// Check Synchronization of multiple Paths to attain e.g. global concurrent optimization
-			if (cJSON_IsTrue(synchPathObj))
-			{
+			if (cJSON_IsTrue(synchPathObj)) {
 				syncPath = TRUE;
 				DEBUG_PC("Path Synchronization is required");
 			}
-			if (cJSON_IsFalse(synchPathObj))
-			{
+			if (cJSON_IsFalse(synchPathObj)) {
 				syncPath = FALSE;
 				DEBUG_PC("No Path Synchronization");
 			}
@@ -707,19 +789,7 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 		// Get service Id in terms of contextId and service uuids
 		cJSON* serviceIdObj = cJSON_GetObjectItem(item, "serviceId");
 		if (cJSON_IsObject(serviceIdObj)) {
-			// Get context Id uuid
-			cJSON* contextIdObj = cJSON_GetObjectItem(serviceIdObj, "contextId");
-			if (cJSON_IsString(contextIdObj)) {
-				// convert the string in contextId->valuestring in uuid binary format
-				duplicate_string(service->serviceId.contextId, contextIdObj->valuestring);
-				DEBUG_PC("ContextId: %s (uuid string format)", service->serviceId.contextId);
-			}
-			// Get service Id uuid
-			cJSON* serviceUuidObj = cJSON_GetObjectItem(serviceIdObj, "service_uuid");
-			if (cJSON_IsString(serviceUuidObj)) {				
-				duplicate_string(service->serviceId.service_uuid, serviceUuidObj->valuestring);
-				DEBUG_PC("Service UUID: %s (uuid string format)", service->serviceId.service_uuid);
-			}				
+			parse_json_serviceId(serviceIdObj, &service->serviceId);
 		}		
 
 		// Get de service type
@@ -747,6 +817,9 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 		if (cJSON_IsNumber(kPathsObj)){
 			service->kPaths = (guint)(kPathsObj->valuedouble);
 		}
+
+		// Append the requested service to the serviceList
+		serviceList = g_list_append(serviceList, service);
 	}
 	return;
 }
@@ -764,7 +837,6 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_capacity_object(cJSON* capacity, struct capacity_t* c) {
-
 	cJSON* totalSizeObj = cJSON_GetObjectItem(capacity, "total-size");
 	if (cJSON_IsObject(totalSizeObj)) {
 		//Get the capacity value
@@ -794,7 +866,6 @@ void parse_capacity_object(cJSON* capacity, struct capacity_t* c) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d) {
-
 	for (gint i = 0; i < cJSON_GetArraySize(endPointsArray); i++) {
 		d->numEndPoints++;
 		struct endPoint_t* endpoint = &(d->endPoints[i]);
@@ -807,30 +878,17 @@ void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d
 			// Get the topology Id Object
 			cJSON* topologyIdObj = cJSON_GetObjectItem(endPointIdObj, "topology_id");
 			if (cJSON_IsObject(topologyIdObj)) {
-				// Get the context Id (UUID) from the topologyIdObj
-				cJSON* contextIdObj = cJSON_GetObjectItem(topologyIdObj, "contextId");
-				if (cJSON_IsString(contextIdObj)) {
-					duplicate_string(endpoint->endPointId.topology_id.contextId, contextIdObj->valuestring);
-					//DEBUG_PC("Device EndPoint (%d)-- ContextId: %s (uuid string format)", i + 1, endpoint->endPointId.topology_id.contextId);
-				}
-				// Get the topologyId (UUID) from the topologyIdObj
-				cJSON* topologyUuidObj = cJSON_GetObjectItem(topologyIdObj, "topology_uuid");
-				if (cJSON_IsString(topologyUuidObj)) {					
-					duplicate_string(endpoint->endPointId.topology_id.topology_uuid, topologyUuidObj->valuestring);
-					//DEBUG_PC("Device Endpoint (%d) -- TopologyId: %s (uuid string format)", i + 1, endpoint->endPointId.topology_id.topology_uuid);
-				}
+				parse_topology_Id(topologyIdObj, &endpoint->endPointId.topology_id);
 			}
 			// Get the deviceId
 			cJSON* deviceIdObj = cJSON_GetObjectItem(endPointIdObj, "device_id");
 			if (cJSON_IsString(deviceIdObj)) {				
 				duplicate_string(endpoint->endPointId.device_id, deviceIdObj->valuestring);
-				//DEBUG_PC("Device Endpoint (%d) -- Device Id: %s (uuid)", i + 1, endpoint->endPointId.device_id);
 			}
 			// Get the endpoint_uuid
 			cJSON* endPointUuidObj = cJSON_GetObjectItem(endPointIdObj, "endpoint_uuid");
 			if (cJSON_IsString(endPointUuidObj)) {				
 				duplicate_string(endpoint->endPointId.endpoint_uuid, endPointUuidObj->valuestring);
-				//DEBUG_PC("Device Endpoint (%d) -- EndPoint Uuid: %s (uuid)", i + 1, endpoint->endPointId.endpoint_uuid);
 			}
 		}
 		// Get the EndPoint Type
@@ -889,6 +947,20 @@ void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d
 				//DEBUG_PC("Inter-Domain Remote Id: %s", endpoint->inter_domain_plug_in.inter_domain_plug_in_remote_id);
 			}
 		}
+
+		// Energy consumption per endPoint port
+		cJSON* energyPortObj = cJSON_GetObjectItem(item, "energy_consumption");
+		if (cJSON_IsNumber(energyPortObj)) {
+			memcpy(&endpoint->energyConsumption, &energyPortObj->valuedouble, sizeof(gdouble));
+			DEBUG_PC("Endpoint Energy Consumption: %f", endpoint->energyConsumption);
+		}
+
+		// Endpoint Operational Status
+		cJSON* operationalStatusObj = cJSON_GetObjectItem(item, "operational_status");
+		if (cJSON_IsNumber(operationalStatusObj)) {
+			endpoint->operational_status = (gint)(operationalStatusObj->valuedouble);
+			DEBUG_PC("Endpoint Operational Status: %d", endpoint->operational_status);
+		}
 	}
 	return;
 }
@@ -907,11 +979,28 @@ void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d
 void parsing_json_deviceList_array(cJSON* deviceArray) {
 	DEBUG_PC("");
 	DEBUG_PC("========= PARSING DEVICE LIST ============");
-	for (gint i = 0; i < cJSON_GetArraySize(deviceArray); i++) {
-		deviceList->numDevices++;
-		struct device_t* d = &(deviceList->devices[i]);
+	for (gint i = 0; i < cJSON_GetArraySize(deviceArray); i++) {		
+		struct device_t* d = g_malloc0(sizeof(struct device_t));
+		if (d == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
 		cJSON* item = cJSON_GetArrayItem(deviceArray, i);
 
+		// Get the power idle of the switch
+		cJSON* powerIdleObj = cJSON_GetObjectItem(item, "power_idle");
+		if (cJSON_IsNumber(powerIdleObj)) {
+			memcpy(&d->power_idle, &powerIdleObj->valuedouble, sizeof(gdouble));
+			DEBUG_PC("Power Idle: %f", d->power_idle);
+		}
+
+		// Get the operational state
+		cJSON* opeStatusObj = cJSON_GetObjectItem(item, "operational_status");
+		if (cJSON_IsNumber(opeStatusObj)) {
+			d->operational_status = (gint)(opeStatusObj->valuedouble);
+			DEBUG_PC("Operational Status: %d (0 Undefined, 1 Disabled, 2 Enabled", d->operational_status);
+		}
+
 		// Get the device UUID
 		cJSON* deviceUuidObj = cJSON_GetObjectItem(item, "device_Id");
 		if (cJSON_IsString(deviceUuidObj)) {
@@ -932,6 +1021,8 @@ void parsing_json_deviceList_array(cJSON* deviceArray) {
 		if (cJSON_IsArray(deviceEndpointsArray)) {
 			parse_json_device_endpoints_array(deviceEndpointsArray, d);
 		}
+		// append the device into the deviceList
+		deviceList = g_list_append(deviceList, d);
 	}
 	return;
 }
@@ -949,7 +1040,6 @@ void parsing_json_deviceList_array(cJSON* deviceArray) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_json_link_endpoints_array(cJSON *endPointsLinkObj, struct link_t* l) {
-
 	for (gint i = 0; i < cJSON_GetArraySize(endPointsLinkObj); i++) {
 		//DEBUG_PC("link: %s has %d endPointIds", l->linkId, l->numLinkEndPointIds);
 		l->numLinkEndPointIds++;
@@ -963,18 +1053,7 @@ void parse_json_link_endpoints_array(cJSON *endPointsLinkObj, struct link_t* l)
 			// Get the topology Id Object
 			cJSON* topologyIdObj = cJSON_GetObjectItem(endPointIdObj, "topology_id");
 			if (cJSON_IsObject(topologyIdObj)) {
-				// Get the context Id (UUID) from the topologyIdObj
-				cJSON* contextIdObj = cJSON_GetObjectItem(topologyIdObj, "contextId");
-				if (cJSON_IsString(contextIdObj)) {					
-					duplicate_string(endPointLink->topology_id.contextId, contextIdObj->valuestring);
-					//DEBUG_PC("Link EndPoint (%d)-- ContextId: %s (uuid string format)", i + 1, endPointLink->topology_id.contextId);
-				}
-				// Get the topologyId (UUID) from the topologyIdObj
-				cJSON* topologyUuidObj = cJSON_GetObjectItem(topologyIdObj, "topology_uuid");
-				if (cJSON_IsString(topologyUuidObj)) {
-					duplicate_string(endPointLink->topology_id.topology_uuid, topologyUuidObj->valuestring);
-					//DEBUG_PC("Link Endpoint (%d) -- TopologyId: %s (uuid string format)", i + 1, endPointLink->topology_id.topology_uuid);
-				}
+				parse_topology_Id(topologyIdObj, &endPointLink->topology_id);				
 			}
 			// Get the deviceId
 			cJSON* deviceIdObj = cJSON_GetObjectItem(endPointIdObj, "device_id");
@@ -1006,21 +1085,23 @@ void parse_json_link_endpoints_array(cJSON *endPointsLinkObj, struct link_t* l)
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parsing_json_linkList_array(cJSON* linkListArray) {
-
 	DEBUG_PC("");
 	DEBUG_PC("======= PARSING OF THE LINK LIST ARRAY ==========");
-	for (gint i = 0; i < cJSON_GetArraySize(linkListArray); i++) {
-		linkList->numLinks++;
-		struct link_t* l = &(linkList->links[i]);
-		//l->numLinkEndPointIds = 0;
-
+	for (gint i = 0; i < cJSON_GetArraySize(linkListArray); i++) {		
+		struct link_t* l = g_malloc0(sizeof(struct link_t));
+		if (l == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
 		cJSON* item = cJSON_GetArrayItem(linkListArray, i);
+
 		// Get the link Id (uuid)
 		cJSON* linkIdObj = cJSON_GetObjectItem(item, "link_Id");
 		if (cJSON_IsString(linkIdObj)) {
 			duplicate_string(l->linkId, linkIdObj->valuestring);
 			DEBUG_PC(" * Link (%d) -- Id: %s (uuid)", i + 1, l->linkId);
 		}
+
 		// Get the link endpoints (assumed to be p2p)
 		cJSON* endPointsLinkObj = cJSON_GetObjectItem(item, "link_endpoint_ids");
 		if (cJSON_IsArray(endPointsLinkObj)) {
@@ -1083,6 +1164,7 @@ void parsing_json_linkList_array(cJSON* linkListArray) {
 				//DEBUG_PC("Link (%d) -- Latency: %f", i + 1, l->latency_characteristics.fixed_latency);
 			}	
 		}
+		linkList = g_list_append(linkList, l);
 	}
 	return;
 }
@@ -1100,15 +1182,22 @@ void parsing_json_linkList_array(cJSON* linkListArray) {
 void generate_reverse_linkList() {
 	DEBUG_PC("");
 	DEBUG_PC("CREATION OF REVERSE LINKS");
-	gint numLinks = linkList->numLinks;
-	
-	for (gint i = 0; i < numLinks; i++) {
-		struct link_t* refLink = &(linkList->links[i]);
-		struct link_t* newLink = &(linkList->links[numLinks + i]);
-		linkList->numLinks++;
+	gint numLinks = g_list_length (linkList);
+	DEBUG_PC("Initial Number of links in the main List: %d", numLinks);
+	gint i = 0;
+	for (GList* ln = g_list_first(linkList);
+		(ln) && (i < numLinks);
+		ln = g_list_next(ln), i++)
+	{
+		struct link_t* refLink = (struct link_t*)(ln->data);
+		struct link_t* newLink = g_malloc0(sizeof(struct link_t));
+		if (newLink == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
 		// Copy the linkId + appending "_rev"
 		duplicate_string(newLink->linkId, refLink->linkId);
-		strcat(newLink->linkId, "_rev");
+		strcat(newLink->linkId, "_rev"); 		
 
 		//DEBUG_PC("refLink: %s // newLink: %s", refLink->linkId, newLink->linkId);
 
@@ -1121,7 +1210,7 @@ void generate_reverse_linkList() {
 			exit(-1);
 		}
 #endif
-		DEBUG_PC(" * Link[%d] -- Id: %s", numLinks + i, newLink->linkId);
+		//DEBUG_PC(" * Link[%d] -- Id: %s", numLinks + i, newLink->linkId);
 
 		//DEBUG_PC("Number of Endpoints in Link: %d", refLink->numLinkEndPointIds);
 		for (gint j = refLink->numLinkEndPointIds - 1, m = 0; j >= 0; j--, m++) {			
@@ -1131,9 +1220,9 @@ void generate_reverse_linkList() {
 			duplicate_string(newEndPId->topology_id.contextId, refEndPId->topology_id.contextId);
 			duplicate_string(newEndPId->topology_id.topology_uuid, refEndPId->topology_id.topology_uuid);
 			//duplicate the deviceId and endPoint_uuid
-			duplicate_string(newEndPId->deviceId, refEndPId->deviceId);
-			duplicate_string(newEndPId->endPointId, refEndPId->endPointId);
-			DEBUG_PC("refLink Endpoint[%d]: %s(%s)", j, refEndPId->deviceId, refEndPId->endPointId);
+			duplicate_string(newEndPId->deviceId, refEndPId->endPointId);
+			duplicate_string(newEndPId->endPointId, refEndPId->deviceId);
+			//DEBUG_PC("refLink Endpoint[%d]: %s(%s)", j, refEndPId->deviceId, refEndPId->endPointId);
 			//DEBUG_PC("newLink Endpoint[%d]: %s(%s)", m, newEndPId->deviceId, newEndPId->endPointId);
 			newLink->numLinkEndPointIds++;
 		}
@@ -1155,11 +1244,87 @@ void generate_reverse_linkList() {
 
 		// duplicate latency characteristics
 		memcpy(&newLink->latency_characteristics.fixed_latency, &refLink->latency_characteristics.fixed_latency, sizeof(gdouble));
+		// Append in the linkList the new creted Link
+		linkList = g_list_append(linkList, newLink);
 	}
-	DEBUG_PC("Terminating Reverse Links [total: %d]", linkList->numLinks);
+	DEBUG_PC("Terminating Reverse Links [total links: %d]", g_list_length(linkList));
 	return;
 }
 
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to parse the JSON object/s for active services
+ *
+ * 	@param actServiceArray
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parsing_json_activeService_array(cJSON* actServiceArray) {
+	DEBUG_PC("");
+	DEBUG_PC("====== PARSING THE JSON CONTENTS OF THE ACTIVE SERVICES =======");
+	
+	for (gint i = 0; i < cJSON_GetArraySize(actServiceArray); i++) {
+		struct activeService_t* actServ = g_malloc0(sizeof(struct activeService_t));
+		if (actServ == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
+		cJSON* item = cJSON_GetArrayItem(actServiceArray, i);
+		// ServiceId
+		cJSON* serviceIdObj = cJSON_GetObjectItem(item, "serviceId");
+		if (cJSON_IsObject(serviceIdObj)) {
+			parse_json_serviceId(serviceIdObj, &actServ->serviceId);
+		}
+		// Service Type
+		cJSON* serviceTypeObj = cJSON_GetObjectItem(item, "serviceType");
+		if (cJSON_IsNumber(serviceTypeObj))
+		{
+			actServ->service_type = (guint)(serviceTypeObj->valuedouble);
+			print_service_type(actServ->service_type);
+		}
+		// Service Endpoints
+		cJSON* endPointIdsArray = cJSON_GetObjectItem(item, "service_endpoints_ids");
+		if (cJSON_IsArray(endPointIdsArray)) {
+			parse_act_service_endPointsIds_array(endPointIdsArray, actServ);
+		}
+		// Parsing the active service path
+		actServ->activeServPath = NULL;
+		cJSON* actServPathArray = cJSON_GetObjectItem(item, "devices");
+		if (cJSON_IsArray(endPointIdsArray)) {
+			for (gint j = 0; j < cJSON_GetArraySize(actServPathArray); j++) {
+				struct activeServPath_t* actServPath = g_malloc0(sizeof(struct activeServPath_t));
+				if (actServPath == NULL) {
+					DEBUG_PC("Memory Allocation Failure");
+					exit(-1);
+				}
+				cJSON* item2 = cJSON_GetArrayItem(item, j);
+				// Topology Id
+				cJSON* topologyIdObj = cJSON_GetObjectItem(item2, "topology_id");
+				if (cJSON_IsObject(topologyIdObj)) {
+					parse_topology_Id(topologyIdObj, &actServPath->topology_id);
+				}
+				// Device Id
+				cJSON* deviceIdObj = cJSON_GetObjectItem(item2, "device_id");
+				if (cJSON_IsString(deviceIdObj)) {
+					duplicate_string(actServPath->deviceId, deviceIdObj->valuestring);
+				}
+				// EndPointId
+				cJSON* endPointUUIDObj = cJSON_GetObjectItem(item2, "endpoint_uuid");
+				if (cJSON_IsString(endPointUUIDObj)) {
+					duplicate_string(actServPath->endPointId, endPointUUIDObj->valuestring);
+				}
+				// Append element from the Active Service Path (i.e.,topologyId, deviceId and endpointId)
+				actServ->activeServPath = g_list_append(actServ->activeServPath, actServPath);
+			}
+		}
+		// append into the Actice Service List
+		activeServList = g_list_append(activeServList, actServ);
+	}
+	return;
+}
 
 ///////////////////////////////////////////////////////////////////////////////////////
 /**
@@ -1176,22 +1341,6 @@ void generate_reverse_linkList() {
 /////////////////////////////////////////////////////////////////////////////////////////
 void parsing_json_obj_pathComp_request(cJSON * root, GIOChannel * source)
 {
-	//DEBUG_PC("**");
-	if (deviceList == NULL){	
-	  	DEBUG_PC ("Device List does not exist ... STOP");
-	  	exit(-1);
-	}
-
-	if (linkList == NULL) {
-		DEBUG_PC("Link List does not exist ... STOP")
-	}
-	
-	if (serviceList == NULL)
-	{
-		DEBUG_PC ("Service List does not exist ... STOP");
-		exit(-1);       
-	} 
-
 	// Set of services to seek their path and resource selection
 	cJSON* serviceListArray = cJSON_GetObjectItem(root, "serviceList");
 	if (cJSON_IsArray(serviceListArray)) {
@@ -1211,7 +1360,14 @@ void parsing_json_obj_pathComp_request(cJSON * root, GIOChannel * source)
 
 		// In the context information, if solely the list of links are passed for a single direction, 
 		// the reverse direction MUST be created sythetically 
-		generate_reverse_linkList();
+		// LGR: deactivated; link duplication needs to be done smartly with TAPI. done manually in topology by now
+		//generate_reverse_linkList();
+	}
+
+	// Get the list of active services
+	cJSON* actServiceArray = cJSON_GetObjectItem(root, "activeServList");
+	if (cJSON_IsArray(actServiceArray)) {
+		parsing_json_activeService_array(actServiceArray);
 	}
 	return;
 }
@@ -1293,19 +1449,16 @@ struct pathComp_client * RESTapi_client_create (GIOChannel * channel_client, gin
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void RESTapi_client_close (struct pathComp_client* client)
-{
+void RESTapi_client_close (struct pathComp_client* client) {
 	//DEBUG_PC("Closing the client (Id: %d) %p", client->type, client);
 	//DEBUG_PC("Client ibuf: %p || obuf: %p", client->ibuf, client->obuf);
 	
-	if (client->ibuf != NULL)
-	{
+	if (client->ibuf != NULL) {
 		//DEBUG_PC("Client ibuf: %p", client->ibuf);
 		stream_free(client->ibuf);
 		client->ibuf = NULL;
 	}
-	if (client->obuf != NULL)
-	{
+	if (client->obuf != NULL) {
 		//DEBUG_PC("Client obuf: %p", client->obuf);
 		stream_free(client->obuf);
 		client->obuf = NULL;
@@ -1331,16 +1484,14 @@ void RESTapi_client_close (struct pathComp_client* client)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void RESTapi_close_operations (GIOChannel * source)
-{
+void RESTapi_close_operations (GIOChannel * source)	{
 	gint fd = g_io_channel_unix_get_fd (source);
 	
 	//DEBUG_PC ("Stop all the operations over the fd: %d", fd);	
 	g_io_channel_flush(source, NULL);
 	GError *error = NULL;    
 	g_io_channel_shutdown (source, TRUE, &error);
-	if(error)
-	{
+	if(error) {
 		DEBUG_PC ("An error occurred ...");
 	}
 	g_io_channel_unref (source);
@@ -1360,8 +1511,7 @@ void RESTapi_close_operations (GIOChannel * source)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void RESTapi_stop (struct pathComp_client* client, GIOChannel * source, gint fd)
-{
+void RESTapi_stop (struct pathComp_client* client, GIOChannel * source, gint fd) {
 	
 	DEBUG_PC("Client Socket: %d is Stopped", fd);
 	// remove client
@@ -1385,38 +1535,31 @@ void RESTapi_stop (struct pathComp_client* client, GIOChannel * source, gint fd)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint RESTapi_get_line (GIOChannel *channel, gchar *buf, gint size)
-{
+gint RESTapi_get_line (GIOChannel *channel, gchar *buf, gint size) {
     gint i = 0;
     //DEBUG_PC ("\n");
     //DEBUG_PC ("----- Read REST API Line(\r\n) ------");
     gint n = 0;
     guchar c = '\0'; // END OF FILE    
     gboolean cr = FALSE;
-    while (i < size - 1)
-    {
+    while (i < size - 1) {
 		n = read_channel (channel, &c, 1);		
-		if (n == -1)
-		{
+		if (n == -1) {
 			//DEBUG_PC ("Close the channel and eliminate the client");
 			return -1;			
 		}	
-		if (n > 0)
-		{
+		if (n > 0) {
 			//DEBUG_PC ("%c", c);
 			buf[i] = c;
 			i++;	
-			if (c == '\r')
-			{
+			if (c == '\r') 			{
 				cr = TRUE;	      
 			}	  
-			if ((c == '\n') && (cr == TRUE))
-			{	   
+			if ((c == '\n') && (cr == TRUE)) 			{	   
 				break;
 			}	        
 		} 
-		else
-		{
+		else {
 			c = '\n';
 			buf[i] = c;
 			i++;
@@ -1445,8 +1588,7 @@ guint RESTapi_get_method (gchar *buf, gint *j)
 	guint RestApiMethod = 0;
 	gchar method[255];
 	gint i = 0;	
-	while (!ISspace(buf[*j]) && (i < sizeof(method) - 1))
-	{
+	while (!ISspace(buf[*j]) && (i < sizeof(method) - 1)) {
 		method[i] = buf[*j];
 		i++; 
 		*j = *j + 1;
@@ -1456,32 +1598,60 @@ guint RESTapi_get_method (gchar *buf, gint *j)
 	
 	// Check that the methods are GET, POST or PUT
 	if (strcasecmp((const char *)method, "GET") && strcasecmp((const char *)method, "POST") && 
-		strcasecmp ((const char *)method, "HTTP/1.1") && strcasecmp ((const char *)method, "PUT"))
-	{
-		DEBUG_PC ("The method: %s is not currently supported ...", method);
+		strcasecmp ((const char *)method, "HTTP/1.1") && strcasecmp ((const char *)method, "PUT")) {
+		DEBUG_PC ("%s is not a method ...", method);
 		return RestApiMethod;	
 	}
 	// Method selector
-	if (strncmp ((const char*)method, "GET", 3) == 0)
-	{
+	if (strncmp ((const char*)method, "GET", 3) == 0) {
 		RestApiMethod = REST_API_METHOD_GET;		
 	}
-	else if (strncmp ((const char*)method, "POST", 4) == 0)
-	{
+	else if (strncmp ((const char*)method, "POST", 4) == 0) {
 		RestApiMethod = REST_API_METHOD_POST;
 	}	
-	else if (strncmp ((const char *)method, "HTTP/1.1", 8) == 0)
-	{
+	else if (strncmp ((const char *)method, "HTTP/1.1", 8) == 0) {
 		RestApiMethod = REST_API_METHOD_HTTP;
 	}
-	else if (strncmp ((const char *)method, "PUT", 3) == 0)
-	{
+	else if (strncmp ((const char *)method, "PUT", 3) == 0) {
 		RestApiMethod = REST_API_METHOD_PUT;
-	}
-	
+	}	
 	return RestApiMethod;	
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to check whether it is a supported method, and return the associated numerical id
+ *
+ * 	@param method
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+guint is_rest_api_method(char *method) {
+	guint RestApiMethod = 0;
+	if (strcasecmp((const char*)method, "GET") && strcasecmp((const char*)method, "POST") &&
+		strcasecmp((const char*)method, "HTTP/1.1") && strcasecmp((const char*)method, "PUT")) {
+		DEBUG_PC("The method: %s is not currently supported ...", method);
+		return RestApiMethod;
+	}
+	// Method selector
+	if (strncmp((const char*)method, "GET", 3) == 0) {
+		RestApiMethod = REST_API_METHOD_GET;
+	}
+	else if (strncmp((const char*)method, "POST", 4) == 0) {
+		RestApiMethod = REST_API_METHOD_POST;
+	}
+	else if (strncmp((const char*)method, "HTTP/1.1", 8) == 0) {
+		RestApiMethod = REST_API_METHOD_HTTP;
+	}
+	else if (strncmp((const char*)method, "PUT", 3) == 0) {
+		RestApiMethod = REST_API_METHOD_PUT;
+	}
+	return RestApiMethod;
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_RESTapi.c
@@ -1535,8 +1705,7 @@ gint get_url (gchar *buf, gint *j, gchar *url)
 /////////////////////////////////////////////////////////////////////////////////////////
 gint get_version (gchar *buf, gint *j, gchar *version) {
 	// Skip space char
-	while (ISspace(buf[*j]) && (*j < strlen(buf)))
-	{
+	while (ISspace(buf[*j]) && (*j < strlen(buf))) {
 		*j = *j + 1;
 	}	
 	//DEBUG_PC ("buf[%d]: %c", *j, buf[*j]);
@@ -1576,8 +1745,7 @@ gint triggering_routeComp (struct compRouteOutputList_t *compRouteList, gchar *a
 	DEBUG_PC("Requested Algorithm: %s", algId);
 	//////////////////// Algorithm Selector (RAId)//////////////////////////////////////	
 	// KSP algorithm
-	if (strncmp ((const char*)algId, "KSP", 3) == 0)
-	{
+	if (strncmp ((const char*)algId, "KSP", 3) == 0) {
 		DEBUG_PC ("Alg Id: KSP");
 		httpCode = pathComp_ksp_alg(compRouteList);
 	}
@@ -1586,20 +1754,11 @@ gint triggering_routeComp (struct compRouteOutputList_t *compRouteList, gchar *a
 		DEBUG_PC("Alg Id: SP");
 		httpCode = pathComp_sp_alg(compRouteList);
 	}
-#if 0
-	// Infrastructure Abstraction (InA)
-	else if (strncmp ((const char*)raId, "InA", 3) == 0) 
-	{
-		//DEBUG_PC ("RA: InA");
-		httpCode = ra_InA_alg (compRouteList);
-	}
-	// Global Concurrent Optimization (GCO): Resoration / Re-Allocation / Re-Optimization
-	else if (strncmp ((const char*)raId, "GCO", 3) == 0)
-	{
-		//DEBUG_PC ("RA: GCO");
-		httpCode = ra_GCO_alg (compRouteList);	
+	// energy-aware routing
+	else if (strncmp((const char*)algId, "EAR", 3) == 0) {
+		DEBUG_PC("Alg Id: Energy Aware Routing, EAR");
+		httpCode = pathComp_ear_alg(compRouteList);
 	}
-#endif
 	return httpCode;
 }
 
@@ -1616,8 +1775,7 @@ gint triggering_routeComp (struct compRouteOutputList_t *compRouteList, gchar *a
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
-{  
+gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data) {  
 	/** some checks */
 	g_assert(source != NULL);
 	g_assert(data != NULL);	
@@ -1635,24 +1793,21 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	gint fd = g_io_channel_unix_get_fd (source);
 	DEBUG_PC ("fd: %d, cond: %d", fd, cond);
 
-	if (cond != G_IO_IN)
-	{
+	if (cond != G_IO_IN) {
 		DEBUG_PC ("Something happening with the channel and fd ... (cond: %d)", cond);
 		RESTapi_stop(client, source, fd);
 		return FALSE;
 	}	
-	/** Clear input buffer. */
+	// Clear input buffer
 	stream_reset (client->ibuf);
 
 	// get line
 	gint nbytes = RESTapi_get_line (source, buf, sizeof (buf));
-	if (nbytes == -1)
-	{
+	if (nbytes == -1) {
 		DEBUG_PC ("nbytes -1 ... CLOSE CLIENT FD and eliminate CLIENT");						
 		RESTapi_stop(client, source, fd);
 		return FALSE;						
-	}		
-	
+	}	
 	if ((buf[0] == '\n') && (nbytes  == 1))
 	{
 		//DEBUG_PC (" -- buf[0] = newline --");
@@ -1661,95 +1816,90 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	}
 	
 	gint i = 0, j = 0;
-	// Get the REST Method
-	guint RestApiMethod = RESTapi_get_method (buf, &j);
-	if (RestApiMethod == 0) 	{
-		DEBUG_PC ("The method is NOT supported ...");
-		RESTapi_unimplemented (source);
-		RESTapi_stop(client, source, fd);
-		return FALSE;
-	}
-
-	// get the REST url
-	gchar url[255];
-	i = get_url (buf, &j, url);	
-	url[i] = '\0';	
 
-	// GET - used for checking status of pathComp ... used url /pathComp/api/v1/health
-	if (RestApiMethod == REST_API_METHOD_GET) {
-		if (strncmp((const char*)url, "/health", 7) != 0) {
-			DEBUG_PC("unknown url [%s] for GET method -- Heatlh function", url);
-			RESTapi_stop(client, source, fd);
-			exit(-1);
+	while (1) {
+		DEBUG_PC("%s", buf);
+		char word[255];		
+		while (!ISspace(buf[j]) && (i < sizeof(word) - 1)) {
+			word[i] = buf[j]; i++; j++;
 		}
-		else {
-			DEBUG_PC("Sending API Response OK to health requests");
-			rapi_response_ok(source, HTTP_RETURN_CODE_OK, NULL);
-			return TRUE;
+		word[i] = '\0';
+		// Check if word is bound to a Method, i.e., POST, GET, HTTP/1.1.
+		guint method = is_rest_api_method(word);
+		if (method == 0) {
+			 // ignore other REST fields i.e., Host:, User-Agent:, Accept: ....			
+			break;
 		}
-	}
-
-	// for method POST, PUT check that the url is "/pathComp"
-	if (RestApiMethod == REST_API_METHOD_POST) {
-		if (strncmp((const char*)url, "/pathComp/api/v1/compRoute", 26) != 0)
-		{
-			DEBUG_PC("Unknown url: %s", url);
-			RESTapi_stop(client, source, fd);
-			exit(-1);
+		// word is bound to a known / supported REST Method
+		else {
+			gchar url[255];
+			i = get_url(buf, &j, url);
+			url[i] = '\0';
+			// GET - used for checking status of pathComp ... used url /pathComp/api/v1/health
+			if (method == REST_API_METHOD_GET) {
+				if (strncmp((const char*)url, "/health", 7) != 0) {
+					DEBUG_PC("unknown url [%s] for GET method -- Heatlh function", url);
+					RESTapi_stop(client, source, fd);
+					exit(-1);
+				}
+				else {
+					DEBUG_PC("Sending API Response OK to health requests");
+					rapi_response_ok(source, HTTP_RETURN_CODE_OK, NULL);
+					return TRUE;
+				}
+			}
+			// for method POST, PUT check that the url is "/pathComp"
+			if (method == REST_API_METHOD_POST) {
+				if (strncmp((const char*)url, "/pathComp/api/v1/compRoute", 26) != 0) {
+					DEBUG_PC("Unknown url: %s", url);
+					RESTapi_stop(client, source, fd);
+					exit(-1);
+				}
+			}
+			// get the version	
+			i = get_version(buf, &j, version);
+			version[i] = '\0';
+			break;
 		}
 	}
-	
-	// get the version	
-	i = get_version (buf, &j, version);
-	version[i] = '\0';		
-
 	// Assume HTTP/1.1, then there is Host Header
 	memset(buf, '\0', sizeof(buf));        
 	nbytes = RESTapi_get_line(source, buf, sizeof (buf));
-	if (nbytes == -1)
-	{
+	if (nbytes == -1) {
 		DEBUG_PC ("nbytes -1 ... then close the fd and eliminate associated client");			
 		RESTapi_stop(client, source, fd);
 		return FALSE;					
-	}
-
-	//DEBUG_PC ("Header: %s", buf);	
+	}	
 	
 	// Headers --- The Header Fields ends up with a void line (i.e., \r\n)
-	while ((nbytes > 0) && (strcmp ("\r\n", (const char *)buf) != 0))
-	{	
+	while ((nbytes > 0) && (strcmp ("\r\n", (const char *)buf) != 0)) {	
 		/* read & discard headers */
 		memset(buf, '\0', sizeof(buf));  
 		nbytes = RESTapi_get_line (source, buf, sizeof (buf));
-		if (nbytes == -1)
-		{
+		if (nbytes == -1) {
 			DEBUG_PC ("nbytes -1 ... then close the fd and eliminate associated client");	
 			RESTapi_stop(client, source, fd);
 			return FALSE;
 		}
 		//DEBUG_PC ("Header: %s", buf);	  
-		if (strncmp ((const char *)buf, "Content-Length:", 15) == 0)
-		{
+		if (strncmp ((const char *)buf, "Content-Length:", 15) == 0) {
 			//DEBUG_PC ("Header Content-Length Found");
 			gchar str[20];
 	  
 			gint i = 15, k = 0;  // "Content-Length:" We skip the first 16 characters to directly retrieve the length in bytes of the Body of Request
 			gchar contentLength[255];
 			memset (contentLength, '\0', sizeof (contentLength));			
-			while (buf[i] != '\r')
-			{
+			while (buf[i] != '\r') {
 				//DEBUG_PC ("%c", buf[i]);
 				str[k] = buf[i];
 				k++, i++;
 			}
 			str[k] = '\0';			
 			j = 0, i = 0;
-			while (ISspace(str[j]) && (j < strlen(str)))
-			{
+			while (ISspace(str[j]) && (j < strlen(str))) {
 				j++;
 			}
-			while (j < strlen(str))
-			{
+			while (j < strlen(str)) {
 				contentLength[i] = str[j];
 				i++; j++;
 			}
@@ -1759,8 +1909,7 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 		}	  
 	}
 	//DEBUG_PC("Read Entire HTTP Header");
-	if (body_length == 0)
-	{
+	if (body_length == 0) {
 		DEBUG_PC ("--- NO REST API Body length (length = %d) ---", body_length);
 		return TRUE;
 	}       
@@ -1769,23 +1918,23 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	/////////////////////////////////////////////////////////////////////////////////////////////////////////////
 	//DEBUG_PC ("REST API Request - Body -");
 	nbytes = read_channel (source, (guchar *)(client->ibuf->data + client->ibuf->putp), body_length);
-	if ((nbytes < 0) && (body_length > 0))
-	{
+	if ((nbytes < 0) && (body_length > 0)) 	{
 		DEBUG_PC ("nbytes: %d; body_length: %d", nbytes, body_length);
 		exit (-1);
-	}
-	
+	}	
 	client->ibuf->putp += nbytes;
 	client->ibuf->endp += nbytes;		
 	///////////////////////////////////////////////////////////////////////////////////////////////////////////////
 	// Parsing the contents of the Request
 	///////////////////////////////////////////////////////////////////////////////////////////////////////////////
-	// build the device list
-	deviceList = create_device_list();
+	// build the device list	
+	deviceList = NULL;
 	// build the link list
-	linkList = create_link_list();
+	linkList = NULL;
 	// Create the network connectivity service list
-	serviceList = create_service_list();
+	serviceList = NULL;
+	// Create the active service List
+	activeServList = NULL;
 	
 	// Process the json contents and store relevant information at Device, Link,
 	// and network connectivity service
@@ -1804,22 +1953,21 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	gint httpCode = triggering_routeComp (compRouteOutputList, algId);	
 
 	// Send the response to the REST  API Client
-	if (httpCode != HTTP_RETURN_CODE_OK)
-	{            
+	if (httpCode != HTTP_RETURN_CODE_OK) {            
 		DEBUG_PC ("HTTP CODE: %d -- NO OK", httpCode);
 		rapi_response (source, httpCode);
 	}
-	else
-	{
+	else {
 		DEBUG_PC ("HTTP CODE: %d -- OK", httpCode);
 		rapi_response_ok (source, httpCode, compRouteOutputList);            
 	}
 	
 	// Release the variables		
-	g_free (compRouteOutputList);
-	g_free(linkList);
-	g_free(deviceList);
-	g_free(serviceList);
+	g_free (compRouteOutputList);	
+	g_list_free_full(g_steal_pointer(&linkList), (GDestroyNotify)destroy_link);
+	g_list_free_full(g_steal_pointer(&deviceList), (GDestroyNotify)destroy_device);
+	g_list_free_full(g_steal_pointer(&serviceList), (GDestroyNotify)destroy_requested_service);
+	g_list_free_full(g_steal_pointer(&activeServList), (GDestroyNotify)destroy_active_service);
 	return TRUE;  
 }
 
@@ -1836,23 +1984,20 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpointer data)
-{
+gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpointer data) {
 	DEBUG_PC (" ****** New TCP Connection (REST API) ******");
 	/** get size of client_addre structure */
 	struct sockaddr_in client_addr;
 	socklen_t client = sizeof(client_addr);
 	
-	if ((cond == G_IO_HUP) || (cond == G_IO_ERR) || (G_IO_NVAL))
-	{
+	if ((cond == G_IO_HUP) || (cond == G_IO_ERR) || (G_IO_NVAL)) {
 		//DEBUG_PC ("Something happening with the channel and fd ... cond: %d", cond);		
 		// Find the associated client (by the fd) and remove from PATH COMP client list. 
 		// Stop all the operations over that PATH COMP client bound channel
 		struct pathComp_client *pathComp_client = NULL;
 		gint fd = g_io_channel_unix_get_fd (source);
 		GList *found = g_list_find_custom (RESTapi_tcp_client_list, &fd, find_rl_client_by_fd);
-		if (found != NULL)
-		{
+		if (found != NULL) 	{
 			pathComp_client = (struct pathComp_client*)(found->data);
 			// remove client
 			RESTapi_client_close(pathComp_client);
@@ -1862,28 +2007,22 @@ gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpoin
 			return FALSE;
 		}		
 	}
-	if (cond == G_IO_IN)
-	{
+	if (cond == G_IO_IN) 	{
 		gint new = accept(g_io_channel_unix_get_fd(source), (struct sockaddr*)&client_addr, &client);
-		if (new < 0)
-		{
+		if (new < 0) {
 			//DEBUG_PC ("Unable to accept new connection");
 			return FALSE;
 		}
 
-		/** new channel */
+		// new channel
 		GIOChannel * new_channel = g_io_channel_unix_new (new);		
 		//DEBUG_PC ("TCP Connection (REST API) is UP; (socket: %d)", new);
-
-		/** create pathComp client */		
+		// create pathComp client		
 		struct pathComp_client *new_client = RESTapi_client_create (new_channel, new);
 		
-		/** 
-		* force binary encoding with NULL
-		*/
+		// force binary encoding with NULL
 		GError *error = NULL;
-		if ( g_io_channel_set_encoding (new_channel, NULL, &error) != G_IO_STATUS_NORMAL)
-		{		
+		if ( g_io_channel_set_encoding (new_channel, NULL, &error) != G_IO_STATUS_NORMAL) {		
 			DEBUG_PC ("Error: %s", error->message);
 			exit (-1);
 		}
@@ -1891,8 +2030,7 @@ gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpoin
 		// On unbuffered channels, it is safe to mix read
 		// & write calls from the new and old APIs.
 		g_io_channel_set_buffered (new_channel, FALSE);
-		if (g_io_channel_set_flags (new_channel, G_IO_FLAG_NONBLOCK, &error) != G_IO_STATUS_NORMAL )
-		{
+		if (g_io_channel_set_flags (new_channel, G_IO_FLAG_NONBLOCK, &error) != G_IO_STATUS_NORMAL ) {
 			DEBUG_PC ("Error: %s", error->message);
 			exit (-1);
 		}
diff --git a/src/pathcomp/backend/pathComp_RESTapi.h b/src/pathcomp/backend/pathComp_RESTapi.h
index 3b662955959fd8ddad27e337338440b6834f9741..997adce3ead70a314c5d49a6ebeda74ea65ee6a2 100644
--- a/src/pathcomp/backend/pathComp_RESTapi.h
+++ b/src/pathcomp/backend/pathComp_RESTapi.h
@@ -48,8 +48,7 @@
 // List of tcp clients connected to PATH COMP
 
 #define PATH_COMP_CLIENT_TYPE	1000
-struct pathComp_client
-{
+struct pathComp_client {
 	/** IO Channel from client. */
 	GIOChannel *channel;
 
diff --git a/src/pathcomp/backend/pathComp_ear.c b/src/pathcomp/backend/pathComp_ear.c
new file mode 100644
index 0000000000000000000000000000000000000000..aee3d09f768619f3f6eb40231133fedd30dbb769
--- /dev/null
+++ b/src/pathcomp/backend/pathComp_ear.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h> 
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <string.h>
+#include <unistd.h>
+#include <netdb.h>
+#include <glib.h>
+#include <sys/time.h>
+#include <ctype.h>
+#include <strings.h>
+#include <time.h>
+#include <math.h>
+#include <fcntl.h>
+
+#include "pathComp_log.h"
+#include "pathComp_tools.h"
+#include "pathComp_ear.h"
+
+// Global Variables
+GList* contextSet;
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_ear.c
+ * 	@brief Iterates over the list of network connectivity service requests
+ * to compute their own paths fulfilling the constraints and minimizing the 
+ * total consume energy (power)
+ *
+ *  @param outputList
+ *
+ *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+void ear_comp_services(struct compRouteOutputList_t* oPathList, gint activeFlag) {
+	g_assert(oPathList);
+	// Check at least there is a service to be processed 
+	if (g_list_length(serviceList) == 0) {
+		DEBUG_PC("serviceList is Empty...");
+		return;
+	}	
+	gint i = 0;
+	DEBUG_PC("[EAR]----- Starting the Energy Aware Routing Computation ------");
+	DEBUG_PC("[EAR]----- Over Context %s Devices and Links", activeFlag ? "Active" : "All");
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+		struct service_t* service = (struct service_t*)(listnode->data);
+
+		DEBUG_PC("[EAR] Triggering Computation ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId);
+		struct compRouteOutput_t* pathService = &(oPathList->compRouteConnection[i]);
+		DEBUG_PC("Number of pathService[%d]->paths: %d", i, pathService->numPaths);
+		// check endpoints of the service are different (PE devices/nodes are different)
+		if (same_src_dst_pe_nodeid(service) == 0) {
+			DEBUG_PC("[EAR] PEs are the same... no path computation");
+			comp_route_connection_issue_handler(pathService, service);
+			oPathList->numCompRouteConnList++;
+			continue;
+		}
+		struct graph_t* g = get_graph_by_contextId(contextSet, service->serviceId.contextId);
+		if (g == NULL) {
+			DEBUG_PC("[EAR] contextId: %s NOT in the ContextSet ... then NO graph", service->serviceId.contextId);
+			comp_route_connection_issue_handler(pathService, service);
+			oPathList->numCompRouteConnList++;
+			continue;
+		}
+		alg_comp(service, pathService, g, ENERGY_EFFICIENT_ARGUMENT);
+		oPathList->numCompRouteConnList++;
+
+		// for each network connectivity service, a single computed path (out of the KCSP) is retuned
+		// If path is found, then the selected resources must be pre-assigned into the context information
+		if (pathService->noPathIssue == NO_PATH_CONS_ISSUE) {
+			continue;
+		}
+		struct path_t* path = &(pathService->paths[pathService->numPaths - 1]);
+		allocate_graph_resources(path, service, g);
+		allocate_graph_reverse_resources(path, service, g);
+		print_graph(g);
+	}
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_ear.c
+ * 	@brief Tries to route all the services over the active devices and links. If not all 
+ * these services can be routed, then it is tried to route them through the whole context 
+ * including both active and slept/power off devices and links
+ *
+ *  @param oList
+ *
+ *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ ////////////////////////////////////////////////////////////////////////////////////////
+void ear_comp(struct compRouteOutputList_t* oList) {
+	g_assert(oList);
+
+	DEBUG_PC("Number of services to be processed: %d", g_list_length(serviceList));
+	// Make a copy of oList	to be derived from the active devices and links
+	struct compRouteOutputList_t* oListTmp = create_route_list();
+	duplicate_route_list(oListTmp, oList);
+	print_path_connection_list(oListTmp);
+	
+	// 1st - try to accommodate all the requested service over the active device and links
+	gint activeContext = 1;
+	// Create the context for the active devicesand links
+	DEBUG_PC("=========================== Building the Active ContextSet =================================");
+	contextSet = NULL;
+	build_contextSet_active(&contextSet);
+	//print_contextSet(contextSet);
+	ear_comp_services(oListTmp, activeContext);
+	
+	gint numSuccessPaths = 0;
+	// Check the number of succesfully computed paths, i.e., without path issues
+	for (gint i = 0; i < oListTmp->numCompRouteConnList; i++) {
+		struct compRouteOutput_t* ro = &(oListTmp->compRouteConnection[i]);
+		DEBUG_PC("Number of paths: %d for oListTmp[%d]", ro->numPaths, i);
+		if (ro->noPathIssue == 0) {
+			numSuccessPaths++;
+		}
+	}
+	if (numSuccessPaths == oListTmp->numCompRouteConnList) {
+		duplicate_route_list(oList, oListTmp);
+		g_free(oListTmp);
+		return;
+	}	
+	// 2nd - If not all the services have been accommodated, use the whole device and links
+	// Create the context for all the devices and links
+
+	// Remove the previous Context subject to active devices and links
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
+	contextSet = NULL;
+	DEBUG_PC("====================== Building the whole ContextSet =====================================");
+	build_contextSet(&contextSet);
+	//print_contextSet(contextSet);
+
+	activeContext = 0; // Active flag is not SET
+	ear_comp_services(oList, activeContext);	
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_ear.c
+ * 	@brief handles the path computation for energy aware routing
+ *
+ *  @param compRouteOutput
+ *
+ *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+gint pathComp_ear_alg(struct compRouteOutputList_t* routeConnList) {
+	g_assert(routeConnList);
+	print_path_connection_list(routeConnList);
+
+	gint numSuccesPathComp = 0, numPathCompIntents = 0;
+
+	DEBUG_PC("================================================================");
+	DEBUG_PC("===========================   EAR   =========================");
+	DEBUG_PC("================================================================");
+	// increase the number of Path Comp. Intents
+	numPathCompIntents++;
+	gint http_code = HTTP_CODE_OK;
+
+	// timestamp t0
+	struct timeval t0;
+	gettimeofday(&t0, NULL);
+
+	// Initialize and create the contextSet
+	//contextSet = NULL;	
+	//build_contextSet(contextSet);
+	//print_contextSet(contextSet);
+#if 1	
+	//Triggering the path computation for each specific network connectivity service
+	ear_comp(routeConnList);
+
+	// -- timestamp t1
+	struct timeval t1, delta;
+	gettimeofday(&t1, NULL);
+	delta.tv_sec = t1.tv_sec - t0.tv_sec;
+	delta.tv_usec = t1.tv_usec - t0.tv_usec;
+	delta = tv_adjust(delta);
+
+	numSuccesPathComp++;
+	update_stats_path_comp(routeConnList, delta, numSuccesPathComp, numPathCompIntents);
+	print_path_connection_list(routeConnList);
+#endif
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
+	return http_code;
+}
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_ear.h b/src/pathcomp/backend/pathComp_ear.h
new file mode 100644
index 0000000000000000000000000000000000000000..dff6202568572bfa3343c21c29ad663e167ccfaa
--- /dev/null
+++ b/src/pathcomp/backend/pathComp_ear.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _PATHCOMP_EAR_H
+#define _PATHCOMP_EAR_H
+
+#include <glib.h>
+#include <glib/gstdio.h>
+#include <glib-2.0/glib/gtypes.h>
+
+ // Prototype of external declaration of functions
+gint pathComp_ear_alg(struct compRouteOutputList_t*);
+
+#endif
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_ksp.c b/src/pathcomp/backend/pathComp_ksp.c
index 4ea413d5eabbccbe1f86a3bc94edca822ffc4e8d..00ebaf5b8b7e0a888720a4092a0d23d75a3eb04b 100644
--- a/src/pathcomp/backend/pathComp_ksp.c
+++ b/src/pathcomp/backend/pathComp_ksp.c
@@ -36,401 +36,7 @@
 #include "pathComp_ksp.h"
 
 // Global Variables
-struct map_nodes_t *mapNodes;
-struct graph_t *graph;
-struct contextSet_t* contextSet;
-
-///////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_ksp.c
- * 	@brief Dijkstra algorithm
- *
- *  @param srcMapIndex
- *  @param dstMapIndex
- *	@param g
- *	@param s
- *	@param SN
- *	@param RP
- *
- *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-void sp_comp(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct service_t* s,
-	struct nodes_t* SN, struct compRouteOutputItem_t* RP) {
-	g_assert(s);
-	g_assert(g);
-
-	// Set params into mapNodes related to the source nodes of the request
-	mapNodes->map[srcMapIndex].distance = 0.0;
-	mapNodes->map[srcMapIndex].latency = 0.0;
-	mapNodes->map[srcMapIndex].avaiBandwidth = 0.0;
-
-	// Initialize the set Q and S
-	GList* S = NULL, * Q = NULL;
-	gint indexVertice = -1;
-
-	//  Add the source into the Q
-	struct nodeItem_t* nodeItem = g_malloc0(sizeof(struct nodeItem_t));
-	if (nodeItem == NULL) {
-		DEBUG_PC("memory allocation failed\n");
-		exit(-1);
-	}
-	// initialize some nodeItem attributes
-	nodeItem->distance = 0.0;
-	nodeItem->latency = 0.0;
-	duplicate_node_id(&mapNodes->map[srcMapIndex].verticeId, &nodeItem->node);
-	Q = g_list_insert_sorted(Q, nodeItem, sort_by_distance);
-
-	// Check whether there is spurNode (SN) and rootPath (RP)
-	if (SN != NULL && RP != NULL) {
-		struct routeElement_t* re;
-		for (gint j = 0; j < RP->numRouteElements; j++)
-		{
-			// Get the source and target Nodes of the routeElement within the rootPath
-			re = &RP->routeElement[j];
-			DEBUG_PC ("root Link: aNodeId: %s (%s) --> zNodeiId: %s (%s)", re->aNodeId.nodeId, re->aEndPointId, re->zNodeId.nodeId, re->zEndPointId);
-
-			// if ingress of the root link (aNodeId) is the spurNode, then stops
-			if (compare_node_id(&re->aNodeId, SN) == 0)
-			{
-				DEBUG_PC ("root Link: aNodeId: %s and spurNode: %s -- stop exploring the rootPath (RP)", re->aNodeId.nodeId, SN->nodeId);
-				break;
-			}
-			// Extract from Q
-			GList* listnode = g_list_first(Q);
-			struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
-			Q = g_list_remove(Q, node);
-
-			//DEBUG_RL_RA ("Exploring node %s", node->node.nodeId);
-			indexVertice = graph_vertice_lookup(node->node.nodeId, g);
-			g_assert(indexVertice >= 0);
-
-			// Get the indexTargetedVertice
-			gint indexTVertice = -1;
-			indexTVertice = graph_targeted_vertice_lookup(indexVertice, re->zNodeId.nodeId, g);
-			gint done = check_link(node, indexVertice, indexTVertice, g, s, &S, &Q, mapNodes);
-			(void)done;
-
-			// Add to the S list
-			S = g_list_append(S, node);    
-		}
-
-		// Check that the first node in Q set is SpurNode, otherwise something went wrong ...
-		if (compare_node_id(&re->aNodeId, SN) != 0) {
-			//DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId);
-			g_list_free_full(S, g_free);
-			g_list_free_full(Q, g_free);
-			return;
-		}
-	}		
-	while (g_list_length(Q) > 0) {
-		//Extract from Q set
-		GList* listnode = g_list_first(Q);
-		struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
-		Q = g_list_remove(Q, node);
-		DEBUG_PC ("Q length: %d", g_list_length (Q)); 
-		DEBUG_PC ("DeviceId: %s", node->node.nodeId);            
-
-		// visit all the links from u within the graph
-		indexVertice = graph_vertice_lookup(node->node.nodeId, g);
-		g_assert(indexVertice >= 0);
-
-		// Check the targeted vertices from u
-		for (gint i = 0; i < g->vertices[indexVertice].numTargetedVertices; i++)  {                
-			gint done = check_link(node, indexVertice, i, g, s, &S, &Q, mapNodes);
-			(void)done;
-		}
-		// Add node into the S Set
-		S = g_list_append(S, node);
-		//DEBUG_PC ("S length: %d", g_list_length (S));              
-	}
-	g_list_free_full(S, g_free);
-	g_list_free_full(Q, g_free);
-	return;
-}
-
-///////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_ksp.c
- * 	@brief KSP computation using Dijkstra algorithm
- *
- *  @param pred
- *  @param g
- *	@param s
-  *	@param SN
- *	@param RP
- *
- *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s, 
-				struct nodes_t *SN, struct compRouteOutputItem_t *RP) {
-	g_assert(pred);
-	g_assert(g);
-	g_assert(s);
-
-	// Check the both ingress src and dst endpoints are in the graph
-	gint srcMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[0].device_uuid, mapNodes);
-	if (srcMapIndex == -1) {
-		DEBUG_PC("ingress DeviceId: %s NOT in the graph", s->service_endpoints_id[0].device_uuid);
-		return -1;
-	}
-
-	gint dstMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
-	if (dstMapIndex == -1) {
-		DEBUG_PC("egress DeviceId: %s NOT in the graph", s->service_endpoints_id[1].device_uuid);
-		return -1;
-	}
-
-	// Compute the shortest path route
-	sp_comp(srcMapIndex, dstMapIndex, g, s, SN, RP);
-		
-	// Check that a feasible solution in term of latency and bandwidth is found
-	gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
-	struct map_t* dest_map = &mapNodes->map[map_dstIndex];
-	if (!(dest_map->distance < INFINITY_COST)) 	{
-		DEBUG_PC("destination: %s NOT reachable", s->service_endpoints_id[1].device_uuid);
-		return -1;
-	}
-
-	DEBUG_PC("AvailBw @ %s is %f", dest_map->verticeId.nodeId, dest_map->avaiBandwidth);
-	// Check that the computed available bandwidth is larger than 0.0
-	if (dest_map->avaiBandwidth <= (gfloat)0.0) {
-		DEBUG_PC("dst: %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid);
-		return -1;
-	}
-	DEBUG_PC("dst: %s REACHABLE", s->service_endpoints_id[1].device_uuid);
-	// Handle predecessors
-	build_predecessors(pred, s, mapNodes);
-	return 1;
-}
-
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_ksp.c
- * 	@brief K-CSPF algorithm execution (YEN algorithm)
- *
- *  @param s
- *  @param path
- *  @param g
- *
- *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_t *g) {
-	g_assert(s);
-	g_assert(path);
-	g_assert(g);
-
-	// create map of devices/nodes to handle the path computation using the context
-	mapNodes = create_map_node();
-	build_map_node(mapNodes, g);
-
-	// predecessors to store the computed path    
-	struct pred_t* predecessors = create_predecessors();
-
-	struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[0]);
-	struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]);
-
-	// Compute the 1st KSP path
-	gint done = ksp_comp (predecessors, g, s, NULL, NULL);
-	if (done == -1) {
-		DEBUG_PC("NO PATH FOUND %s[%s] ---> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid);
-		comp_route_connection_issue_handler(path, s);
-		g_free(mapNodes); g_free(predecessors);
-		return;
-	}
-
-	// Construct the path from the computed predecessors
-	struct compRouteOutputItem_t* p = create_path_item();
-	//print_predecessors(predecessors);
-	build_path(p, predecessors, s);
-	//DEBUG_PC ("Path is constructed");
-
-	gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
-	struct map_t* dst_map = &mapNodes->map[indexDest];
-	// Get the delay and cost
-	memcpy(&p->cost, &dst_map->distance, sizeof(gdouble));
-	memcpy(&p->availCap, &dst_map->avaiBandwidth, sizeof(dst_map->avaiBandwidth));
-	memcpy(&p->delay, &dst_map->latency, sizeof(mapNodes->map[indexDest].latency));
-	DEBUG_PC ("Computed Path Avail Bw: %f, Path Cost: %f, latency: %f", p->availCap, p->cost, p->delay);
-	print_path(p);
-
-	// If 1st SP satisfies the requirements from the req, STOP
-	gboolean feasibleRoute = check_computed_path_feasability(s, p);
-	if (feasibleRoute == TRUE) 	{
-		DEBUG_PC("1st K-CSPF FEASIBLE, STOP!");
-		print_path (p);		
-		path->numPaths++;
-
-		// Copy the serviceId
-		DEBUG_PC("contextId: %s", s->serviceId.contextId);
-		copy_service_id(&path->serviceId, &s->serviceId);
-
-		// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
-		for (gint i = 0; i < s->num_service_endpoints_id; i++) {
-			struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[i]);
-			struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[i]);
-			copy_service_endpoint_id(oEp, iEp);
-		}
-		path->num_service_endpoints_id = s->num_service_endpoints_id;
-
-		// Copy the computed path
-		struct path_t* targetedPath = &(path->paths[path->numPaths - 1]);
-		duplicate_path_t(p, targetedPath);		
-		print_path_t (targetedPath);
-		g_free(predecessors);
-		g_free(p);
-		g_free(mapNodes);
-		return;
-	}
-
-	DEBUG_PC("1st CSPF COMPUTATION IS NOT FEASIBLE --> TRIGGER K COMPUTATIONS");
-	// Create A and B sets of paths to handle the YEN algorithm
-	struct path_set_t* A = create_path_set();
-	struct path_set_t* B = create_path_set();
-
-	// Add the previously computed path into A->paths[0]	
-	duplicate_path(p, &A->paths[0]);
-
-	A->numPaths++;
-	g_free(predecessors);
-	g_free(p);
-	for (gint k = 1; k < MAX_KSP_VALUE; k++) {
-		DEBUG_PC("------------ kth (%d) ---------------------", k);
-		struct compRouteOutputItem_t* p = create_path_item();
-		duplicate_path(&A->paths[k - 1], p);
-		// The spurNode ranges from near-end node of the first link to the near-end of the last link forming the kth path
-		gint i = 0;
-		struct compRouteOutputItem_t* rootPath = create_path_item();
-		for (i = 0; i < p->numRouteElements; i++) {
-			struct nodes_t* spurNode = create_node();
-			struct nodes_t* nextSpurNode = create_node();
-			struct routeElement_t* re = &(p->routeElement[i]);
-			// Create predecessors to store the computed path
-			struct pred_t* predecessors = create_predecessors();
-			// Clear previous mapNodes, i.e. create it again
-			g_free(mapNodes);
-			mapNodes = create_map_node();
-			build_map_node(mapNodes, g);
-			struct nodes_t* n = &re->aNodeId;
-			duplicate_node_id(n, spurNode);
-			n = &re->zNodeId;
-			duplicate_node_id(n, nextSpurNode);
-			DEBUG_PC("spurNode: %s --> nextSpurNode: %s", spurNode->nodeId, nextSpurNode->nodeId);
-
-			// rootPath contains a set of links of A[k-1] from the source Node till the SpurNode -> NextSpurNode
-			// Example: A[k-1] = {L1, L2, L3, L4}, i.e. " Node_a -- L1 --> Node_b -- L2 --> Node_c -- L3 --> Node_d -- L4 --> Node_e "
-			// E.g., for the ith iteration if the spurNode = Node_c and NextSpurNode = Node_d; then rootPath = {L1, L2, L3}			
-			add_routeElement_path_back(re, rootPath);
-			DEBUG_PC("rootPath:");
-			print_path(rootPath);
-
-			// For all existing and computed paths p in A check if from the source to the NextSpurNode
-			// the set of links matches with those contained in the rootPath
-			// If YES, remove from the auxiliary graph the next link in p from NextSpurNode
-			// Otherwise do nothing 
-			struct graph_t* gAux = create_graph();
-			// Baseline graph 
-			//build_graph (gAux);
-			duplicate_graph(g, gAux);
-			// Modified graph
-			modify_targeted_graph(gAux, A, rootPath, spurNode);
-
-			// Trigger the computation of the path from src to dst constrained to traverse all the links from src 
-			// to spurNode contained into rootPath over the resulting graph			
-			if (ksp_comp(predecessors, gAux, s, spurNode, rootPath) == -1) {
-				DEBUG_PC("FAILED SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
-				g_free(nextSpurNode);
-				g_free(spurNode);
-				g_free(gAux);
-				g_free(predecessors);
-				continue;
-			}
-			DEBUG_PC("SUCCESFUL SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
-			// Create the node list from the predecessors
-			struct compRouteOutputItem_t* newKpath = create_path_item();
-			build_path(newKpath, predecessors, s);
-			DEBUG_PC("new K (for k: %d) Path is built", k);
-			gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
-			struct map_t* dst_map = &mapNodes->map[indexDest];
-
-			memcpy(&newKpath->cost, &dst_map->distance, sizeof(gdouble));
-			memcpy(&newKpath->availCap, &dst_map->avaiBandwidth, sizeof(dst_map->avaiBandwidth));
-			memcpy(&newKpath->delay, &dst_map->latency, sizeof(mapNodes->map[indexDest].latency));			
-			DEBUG_PC("New PATH (@ kth: %d) ADDED to B[%d] - {Path Cost: %f, e2e latency: %f, bw: %f ", k, B->numPaths, newKpath->cost, newKpath->delay, newKpath->availCap);
-			// Add the computed kth SP to the heap B
-			duplicate_path(newKpath, &B->paths[B->numPaths]);
-			B->numPaths++;
-			DEBUG_PC("Number of B paths: %d", B->numPaths);
-
-			g_free(newKpath);
-			g_free(nextSpurNode);
-			g_free(spurNode);
-			g_free(gAux);
-			g_free(predecessors);
-		}
-
-		// If B is empty then stops
-		if (B->numPaths == 0) {
-			DEBUG_PC("B does not have any path ... the stops kth computation");
-			break;
-		}
-
-		// Sort the potential paths contained in B by cost and latency and available bandwidth
-		sort_path_set(B);
-
-		// Add the lowest path into A[k]		
-		DEBUG_PC("-------------------------------------------------------------");
-		DEBUG_PC("To Add SP from B[0] to A[%d] --- Path Cost: %f, e2e Latency: %f", A->numPaths, B->paths[0].cost, B->paths[0].delay);
-		duplicate_path(&B->paths[0], &A->paths[A->numPaths]);
-		A->numPaths++;
-		DEBUG_PC("A Set size: %d", A->numPaths);
-		DEBUG_PC("-------------------------------------------------------------");
-
-		// Remove/pòp front element from the path set B (i.e. remove B[0])
-		pop_front_path_set(B);
-		DEBUG_PC("B Set Size: %d", B->numPaths);
-	}
-
-	// Copy the serviceId
-	copy_service_id(&path->serviceId, &s->serviceId);
-	// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
-	for (gint m = 0; m < s->num_service_endpoints_id; m++) {
-		struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[m]);
-		struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[m]);
-		copy_service_endpoint_id(oEp, iEp);
-	}
-
-	for (gint ksp = 1; ksp < A->numPaths; ksp++){
-		if (ksp >= MAX_KSP_VALUE) {
-			DEBUG_PC("Number Requested paths (%d) REACHED - STOP", ksp);
-			break;
-		}
-		gdouble feasibleRoute = check_computed_path_feasability(s, &A->paths[ksp]);
-		if (feasibleRoute == TRUE) {
-			DEBUG_PC("A[k-th%d] available: %f, pathCost: %f; latency: %f", ksp, A->paths[ksp].availCap, A->paths[ksp].cost, A->paths[ksp].delay);
-			struct compRouteOutputItem_t* pathaux = &A->paths[ksp];
-			path->numPaths++;
-			struct path_t* targetedPath = &path->paths[path->numPaths - 1];
-			duplicate_path_t(pathaux, targetedPath);		
-			print_path_t(targetedPath);
-			remove_path_set(A);
-			remove_path_set(B);
-			return;
-		}
-	}
-	remove_path_set(A);
-	remove_path_set(B);
-	// No paths found --> Issue	
-	DEBUG_PC("K-SP failed!!!");
-	comp_route_connection_issue_handler(path, s);
-
-	return;
-}
+GList* contextSet;
 
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
@@ -446,14 +52,20 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_
  /////////////////////////////////////////////////////////////////////////////////////////
 void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) {
 	g_assert(outputList);
-	g_assert(contextSet);
-	g_assert(serviceList);
-
+	// Check at least there is a service to be processed 
+	if (g_list_length(serviceList) == 0) {
+		DEBUG_PC("serviceList is Empty...");
+		return;
+	}
 	DEBUG_PC("----- Starting the KSP Computation ------");
 
 	// Iterate over the list of requested network connectivity services
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		struct service_t* service = &(serviceList->services[i]);
+	gint i = 0;
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++){
+		//struct service_t* service = &(serviceList->services[i]);
+		struct service_t* service = (struct service_t*)(listnode->data);
 
 		DEBUG_PC("Starting the Computation for ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId);
 		struct compRouteOutput_t* pathService = &(outputList->compRouteConnection[i]);
@@ -472,13 +84,12 @@ void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) {
 			outputList->numCompRouteConnList++;
 			continue;
 		}
-		alg_comp(service, pathService, g);
+		alg_comp(service, pathService, g, NO_OPTIMIZATION_ARGUMENT); // last parameter 0 is related to an optimization computation argument
 		outputList->numCompRouteConnList++;
 
-		// for each network connectivity service, a single computed path (out of the KCSP) is retuned
+		// for each network connectivity service, a single computed path (out of the KSP) is retuned
 		// If path is found, then the selected resources must be pre-assigned into the context information
-		if (pathService->noPathIssue == NO_PATH_CONS_ISSUE)
-		{
+		if (pathService->noPathIssue == NO_PATH_CONS_ISSUE) {
 			continue;
 		}
 		struct path_t* path = &(pathService->paths[pathService->numPaths - 1]);
@@ -517,9 +128,9 @@ gint pathComp_ksp_alg(struct compRouteOutputList_t * routeConnList)
 	gettimeofday(&t0, NULL);	
 	
 	// Allocate memory for the context
-	contextSet = create_contextSet();
+	contextSet = NULL;
 	// Build up the contextSet (>= 1)
-	build_contextSet(contextSet);
+	build_contextSet(&contextSet);
 	print_contextSet(contextSet);	
 #if 1	
 	//Triggering the path computation for each specific network connectivity service
@@ -537,6 +148,6 @@ gint pathComp_ksp_alg(struct compRouteOutputList_t * routeConnList)
 	print_path_connection_list(routeConnList);
 #endif
 
-	g_free(contextSet);
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
 	return http_code;
 }
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_sp.c b/src/pathcomp/backend/pathComp_sp.c
index 447b0d2a6d002d12808f80c855c74f8d0b489743..48231b591e66ae0f8161ff14f79e7c9a6d832328 100644
--- a/src/pathcomp/backend/pathComp_sp.c
+++ b/src/pathcomp/backend/pathComp_sp.c
@@ -36,74 +36,7 @@
 #include "pathComp_sp.h"
 
 // Global Variables
-struct map_nodes_t* mapNodes;
-struct graph_t* graph;
-struct contextSet_t* contextSet;
-
-///////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_sp.c
- * 	@brief Excecution Dijkstra algorithm
- *
- *  @param srcMapIndex
- *  @param dstMapIndex
- *	@param g
- *	@param s
- *
- *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct service_t* s) {
-	g_assert(s);
-	g_assert(g);
-
-	// Set params into mapNodes related to the source nodes of the request
-	mapNodes->map[srcMapIndex].distance = 0.0;
-	mapNodes->map[srcMapIndex].latency = 0.0;
-	mapNodes->map[srcMapIndex].avaiBandwidth = 0.0;
-
-	// Initialize the set Q and S
-	GList* S = NULL, *Q = NULL;
-	gint indexVertice = -1;
-
-	//  Add the source into the Q
-	struct nodeItem_t* nodeItem = g_malloc0(sizeof(struct nodeItem_t));
-	if (nodeItem == NULL) {
-		DEBUG_PC("memory allocation failed\n");
-		exit(-1);
-	}
-	// initialize some nodeItem attributes
-	nodeItem->distance = 0.0;
-	nodeItem->latency = 0.0;
-	duplicate_node_id(&mapNodes->map[srcMapIndex].verticeId, &nodeItem->node);
-	Q = g_list_insert_sorted(Q, nodeItem, sort_by_distance);
-
-	while (g_list_length(Q) > 0) {
-		//Extract from Q set
-		GList* listnode = g_list_first(Q);
-		struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
-		Q = g_list_remove(Q, node);
-		DEBUG_PC("Q length: %d", g_list_length(Q));
-		DEBUG_PC("DeviceId: %s", node->node.nodeId);
-
-		// visit all the links from u within the graph
-		indexVertice = graph_vertice_lookup(node->node.nodeId, g);
-		g_assert(indexVertice >= 0);
-
-		// Check the targeted vertices from u
-		for (gint i = 0; i < g->vertices[indexVertice].numTargetedVertices; i++) {
-			gint done = check_link(node, indexVertice, i, g, s, &S, &Q, mapNodes);
-			(void)done;
-		}
-		// Add node into the S Set
-		S = g_list_append(S, node);
-		//DEBUG_PC ("S length: %d", g_list_length (S));              
-	}
-	g_list_free_full(S, g_free);
-	g_list_free_full(Q, g_free);
-	return;
-}
+GList* contextSet;
 
 ///////////////////////////////////////////////////////////////////////////////////
 /**
@@ -113,12 +46,13 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv
  *  @param pred
  *  @param g
  *	@param s
+ *  @param mapNodes
  *
  *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-gint computation(struct pred_t* pred, struct graph_t* g, struct service_t* s) {
+gint computation(struct pred_t* pred, struct graph_t* g, struct service_t* s, struct map_nodes_t* mapNodes) {
 	g_assert(pred);
 	g_assert(g);
 	g_assert(s);
@@ -137,7 +71,7 @@ gint computation(struct pred_t* pred, struct graph_t* g, struct service_t* s) {
 	}
 
 	// Compute the shortest path
-	dijkstra(srcMapIndex, dstMapIndex, g, s);
+	dijkstra(srcMapIndex, dstMapIndex, g, s, mapNodes, NULL, NULL, 0x00000000);
 
 	// Check that a feasible solution in term of latency and bandwidth is found
 	gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
@@ -179,7 +113,7 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 	g_assert(g);
 
 	// create map of devices / nodes to handle the path computation using the context
-	mapNodes = create_map_node();
+	struct map_nodes_t *mapNodes = create_map_node();
 	build_map_node(mapNodes, g);
 
 	// predecessors to store the computed path    
@@ -189,7 +123,7 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 	struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]);
 
 	// SP computation
-	gint done = computation(predecessors, g, s);
+	gint done = computation(predecessors, g, s, mapNodes);
 	if (done == -1) {
 		DEBUG_PC("NO PATH FOUND %s[%s] ---> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid);
 		comp_route_connection_issue_handler(path, s);
@@ -204,11 +138,8 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 	//DEBUG_PC ("Path is constructed");
 
 	gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
-	struct map_t* dst_map = &mapNodes->map[indexDest];
-	// Get the delay and cost
-	memcpy(&p->cost, &dst_map->distance, sizeof(gdouble));
-	memcpy(&p->availCap, &dst_map->avaiBandwidth, sizeof(dst_map->avaiBandwidth));
-	memcpy(&p->delay, &dst_map->latency, sizeof(mapNodes->map[indexDest].latency));
+	struct map_t* dst_map = &mapNodes->map[indexDest]; 	
+	set_path_attributes(p, dst_map);
 	DEBUG_PC("Computed Path Avail Bw: %f, Path Cost: %f, latency: %f", p->availCap, p->cost, p->delay);
 	print_path(p);
 
@@ -239,10 +170,8 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 		g_free(mapNodes);
 		return;
 	}
-
 	DEBUG_PC("SP FAILED!!!");
 	comp_route_connection_issue_handler(path, s);
-
 	return;
 }
 
@@ -257,16 +186,21 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
  *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
-void sp_execution_services(struct compRouteOutputList_t* oPathList)
-{
-	g_assert(oPathList);
-	g_assert(contextSet);
-	g_assert(serviceList);
+void sp_execution_services(struct compRouteOutputList_t* oPathList) {
+	g_assert(oPathList);	
+	// Check at least there is a service to be processed 
+	if (g_list_length(serviceList) == 0) {
+		DEBUG_PC("Lengtg requested serviceList is Empty...");
+		return;
+	}
 
 	DEBUG_PC("----- Starting the SP Computation ------");
-
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		 struct service_t* service = &(serviceList->services[i]);
+	gint i = 0;
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+		//struct service_t* service = &(serviceList->services[i]);
+		struct service_t* service = (struct service_t*)(listnode->data);
 
 		 DEBUG_PC("Starting the Computation for ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId);
 		 struct compRouteOutput_t* pathService = &(oPathList->compRouteConnection[i]);
@@ -296,8 +230,8 @@ void sp_execution_services(struct compRouteOutputList_t* oPathList)
 			 continue;
 		 }
 		 struct path_t* path = &(pathService->paths[pathService->numPaths - 1]);
-		 allocate_graph_resources(path, service, g);
-		 allocate_graph_reverse_resources(path, service, g);
+		 //allocate_graph_resources(path, service, g);			// LGR: crashes in some cases with assymetric topos
+		 //allocate_graph_reverse_resources(path, service, g);	// LGR: crashes in some cases with assymetric topos
 		 print_graph(g);
 	}
 	return;
@@ -314,10 +248,8 @@ void sp_execution_services(struct compRouteOutputList_t* oPathList)
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList)
-{
+gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList) {
 	g_assert(routeConnList);
-
 	gint numSuccesPathComp = 0, numPathCompIntents = 0;
 
 	DEBUG_PC("================================================================");
@@ -332,9 +264,9 @@ gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList)
 	gettimeofday(&t0, NULL);
 
 	// Allocate memory for the context
-	contextSet = create_contextSet();
+	contextSet = NULL;
 	// Build up the contextSet (>= 1)
-	build_contextSet(contextSet);
+	build_contextSet(&contextSet);
 	print_contextSet(contextSet);
 #if 1	
 	//Triggering the path computation for each specific network connectivity service
@@ -352,7 +284,7 @@ gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList)
 	print_path_connection_list(routeConnList);
 #endif
 
-	g_free(contextSet);
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
 	return http_code;
 }
 
diff --git a/src/pathcomp/backend/pathComp_tools.c b/src/pathcomp/backend/pathComp_tools.c
index 5f1748b1a58a0d1b935c064ef4b92ac8ee0da389..e7b91ee9e5a8a0a1c28344d17247e307238ed4c7 100644
--- a/src/pathcomp/backend/pathComp_tools.c
+++ b/src/pathcomp/backend/pathComp_tools.c
@@ -59,7 +59,6 @@ struct timeval tv_adjust (struct timeval a) {
 		a.tv_usec -= 1000000;
 		a.tv_sec++;
 	}
-
 	while (a.tv_usec < 0) {
 		a.tv_usec += 1000000;
 		a.tv_sec--;
@@ -80,8 +79,7 @@ struct timeval tv_adjust (struct timeval a) {
  */
  ////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_string(gchar* dst, gchar* src) {
-	g_assert(dst);
-	g_assert(src);
+	g_assert(dst); g_assert(src);
 	strcpy(dst, src);
 	dst[strlen(dst)] = '\0';
 	return;
@@ -99,16 +97,15 @@ void duplicate_string(gchar* dst, gchar* src) {
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void print_path (struct compRouteOutputItem_t *p) {
-	g_assert(p);
-	
+	g_assert(p);	
 	DEBUG_PC ("=========== COMPUTED PATH =======================");
-	DEBUG_PC ("Path Avail. Bw: %f, E2E Path Latency: %f, Path Cost: %f", p->availCap, p->delay, p->cost);
+	DEBUG_PC ("E2E Avail. Bw: %f, Latency: %f, Cost: %f, Consumed Power (in W): %f", p->availCap, p->delay, p->cost, p->power);
 	for (gint k = 0; k < p->numRouteElements; k++) {
-		DEBUG_PC ("aNodeId: %s (%s) --> zNodeId: %s (%s)", p->routeElement[k].aNodeId.nodeId, p->routeElement[k].aEndPointId,
+		DEBUG_PC ("%s[%s] --> %s[%s]", p->routeElement[k].aNodeId.nodeId, p->routeElement[k].aEndPointId,
 																p->routeElement[k].zNodeId.nodeId, p->routeElement[k].zEndPointId);
-		DEBUG_PC("linkId: %s", p->routeElement[k].linkId);
-		DEBUG_PC("aTopologyId: %s", p->routeElement[k].aTopologyId);
-		DEBUG_PC("zTopologyId: %s", p->routeElement[k].zTopologyId);
+		DEBUG_PC("\t linkId: %s", p->routeElement[k].linkId);
+		DEBUG_PC("\t aTopologyId: %s", p->routeElement[k].aTopologyId);
+		DEBUG_PC("\t zTopologyId: %s", p->routeElement[k].zTopologyId);
 	}
 	DEBUG_PC ("==================================================================");		
 	return;
@@ -128,8 +125,8 @@ void print_path (struct compRouteOutputItem_t *p) {
 void print_path_t(struct path_t* p) {
 	g_assert(p);
 	DEBUG_PC(" ============ COMPUTED OUTPUT PATH =================");
-	DEBUG_PC("Path Avail Capacity: %f, Cost: %f, Latency: %f", p->path_capacity.value,
-			p->path_cost.cost_value, p->path_latency.fixed_latency);
+	DEBUG_PC("Path AvailBw: %f, Cost: %f, Latency: %f, Power: %f", p->path_capacity.value,
+			p->path_cost.cost_value, p->path_latency.fixed_latency, p->path_power.power);
 	DEBUG_PC("number of links of path %d", p->numPathLinks);
 	for (gint k = 0; k < p->numPathLinks; k++) {
 		DEBUG_PC("Link: %s", p->pathLinks[k].linkId);
@@ -144,6 +141,25 @@ void print_path_t(struct path_t* p) {
 	return;
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Function used allocate memory for struct path_t
+ *
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ ////////////////////////////////////////////////////////////////////////////////////////
+struct path_t* create_path() {
+	struct path_t* p = g_malloc0(sizeof(struct path_t));
+	if (p == NULL) {
+		DEBUG_PC("Memory allocation failure");
+		exit(-1);
+	}
+	return(p);
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -178,12 +194,9 @@ gchar* get_uuid_char(uuid_t uuid) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void copy_service_id(struct serviceId_t* o, struct serviceId_t* i) {
-	g_assert(o);
-	g_assert(i);
-
+	g_assert(o); g_assert(i);
 	memcpy(o->contextId, i->contextId, sizeof(i->contextId));
 	memcpy(o->service_uuid, i->service_uuid, sizeof(i->service_uuid));
-
 	return;
 }
 
@@ -200,8 +213,7 @@ void copy_service_id(struct serviceId_t* o, struct serviceId_t* i) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void copy_service_endpoint_id(struct service_endpoints_id_t* oEp, struct service_endpoints_id_t* iEp) {
-	g_assert(oEp);
-	g_assert(iEp);
+	g_assert(oEp); g_assert(iEp);
 
 	// copy topology information
 	memcpy(oEp->topology_id.contextId, iEp->topology_id.contextId, sizeof(iEp->topology_id.contextId));
@@ -216,8 +228,8 @@ void copy_service_endpoint_id(struct service_endpoints_id_t* oEp, struct service
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
- * 	@brief From the set of contexts, it is returned the graph associated to that contexct matching
- * with the passed contextId
+ * 	@brief From the set of contexts, it is returned the graph associated to that context matching
+ *	with the passed contextId.
  *
  *	@param Set
  *  @param contextId
@@ -226,15 +238,16 @@ void copy_service_endpoint_id(struct service_endpoints_id_t* oEp, struct service
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct graph_t* get_graph_by_contextId(struct contextSet_t* Set, gchar* contextId) {
-	g_assert(Set);
+struct graph_t* get_graph_by_contextId(GList* set, gchar* contextId) {
 	g_assert(contextId);
 
 	// iterate over the set of context. Pick the one matching with contextId, and return the graph.
 	// If not found, return NULL
 	struct graph_t* g = NULL;
-	for (gint i = 0; i < Set->num_context_set; i++) {
-		struct context_t* context = &(Set->contextList[i]);
+	for (GList *ln = g_list_first(set);
+		ln;
+		ln = g_list_next(ln)){
+		struct context_t* context = (struct context_t*)(ln->data);
 		if (strcmp(context->contextId, contextId) == 0) {
 			g = &(context->g);
 			return g;
@@ -297,16 +310,13 @@ struct path_constraints_t * get_path_constraints(struct service_t* s) {
  * 	@file pathComp_tools.c
  * 	@brief Creates the predecessors to keep the computed path
  *
- * 
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct pred_t * create_predecessors ()
-{
+struct pred_t * create_predecessors () {
 	struct pred_t *predecessors = g_malloc0 (sizeof (struct pred_t));
-	if (predecessors == NULL)
-	{
+	if (predecessors == NULL) {
 		DEBUG_PC ("memory allocation failed\n");
 		exit (-1);
 	}   
@@ -323,11 +333,9 @@ struct pred_t * create_predecessors ()
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct edges_t* create_edge()
-{
+struct edges_t* create_edge() {
 	struct edges_t* e = g_malloc0(sizeof(struct edges_t));
-	if (e == NULL)
-	{
+	if (e == NULL) {
 		DEBUG_PC("Memory allocation failed\n");
 		exit(-1);
 	}
@@ -376,16 +384,13 @@ void print_predecessors (struct pred_t *p)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void build_predecessors (struct pred_t *p, struct service_t *s, struct map_nodes_t *map)
-{
-	g_assert (p);
-	g_assert (s);
-	g_assert (map);
+void build_predecessors (struct pred_t *p, struct service_t *s, struct map_nodes_t *map) {
+	g_assert (p); g_assert (s); g_assert (map);
 	
 	struct nodes_t *v = create_node();
 	duplicate_string(v->nodeId, s->service_endpoints_id[1].device_uuid);
 	
-	struct edges_t *e = create_edge ();	
+	struct edges_t *e = create_edge();	
 	get_edge_from_map_by_node (e, v, map);
 			
 	// Get u (being source of edge e)
@@ -416,9 +421,7 @@ void build_predecessors (struct pred_t *p, struct service_t *s, struct map_nodes
 		p->numPredComp++;		
 	}
 	print_predecessors (p);
-    g_free (e);
-	g_free(v);
-	g_free(srcNode);
+    g_free (e); g_free(v); g_free(srcNode);
 	return;
 }
 
@@ -452,11 +455,9 @@ struct nodes_t * create_node ()
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct routeElement_t * create_routeElement ()
-{
+struct routeElement_t * create_routeElement () {
 	struct routeElement_t *rE = g_malloc0 (sizeof (struct routeElement_t));
-	if (rE == NULL)
-	{
+	if (rE == NULL)	{
 		DEBUG_PC ("memory allocation problem");
 		exit (-1);		
 	}
@@ -475,11 +476,9 @@ struct routeElement_t * create_routeElement ()
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void duplicate_node_id (struct nodes_t *src, struct nodes_t *dst)
-{	
+void duplicate_node_id (struct nodes_t *src, struct nodes_t *dst) {	
 	g_assert (src);
-	g_assert (dst);
-	
+	g_assert (dst);	
 	//DEBUG_PC ("Duplicate nodeId for %s", src->nodeId);	
 	strcpy (dst->nodeId, src->nodeId);	
 	return;
@@ -497,8 +496,7 @@ void duplicate_node_id (struct nodes_t *src, struct nodes_t *dst)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint compare_node_id (struct nodes_t *a, struct nodes_t *b)
-{
+gint compare_node_id (struct nodes_t *a, struct nodes_t *b) {
 	g_assert (a);
 	g_assert (b);	
 	return (memcmp (&a->nodeId, b->nodeId, strlen (b->nodeId)));	
@@ -541,8 +539,7 @@ void duplicate_routeElement (struct routeElement_t *src, struct routeElement_t *
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_edge (struct edges_t *e1, struct edges_t *e2) {
-	g_assert (e1);
-	g_assert (e2);
+	g_assert (e1); g_assert (e2);
 		
 	duplicate_node_id (&e2->aNodeId, &e1->aNodeId);
 	duplicate_node_id (&e2->zNodeId, &e1->zNodeId);
@@ -560,7 +557,8 @@ void duplicate_edge (struct edges_t *e1, struct edges_t *e2) {
 	memcpy(&e1->availCap, &e2->availCap, sizeof(gdouble));
 
 	memcpy (&e1->cost, &e2->cost, sizeof (gdouble));
-    memcpy (&e1->delay, &e2->delay, sizeof (gdouble));	
+    memcpy (&e1->delay, &e2->delay, sizeof (gdouble));
+	memcpy(&e1->energy, &e2->energy, sizeof(gdouble));
 	return;
 }
 
@@ -577,19 +575,18 @@ void duplicate_edge (struct edges_t *e1, struct edges_t *e2) {
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_path (struct compRouteOutputItem_t *a, struct compRouteOutputItem_t *b) {		
-	g_assert (a);
-	g_assert (b);
-	memcpy (&b->availCap, &a->availCap, sizeof (gdouble));
-	b->numRouteElements = a->numRouteElements;	
+	g_assert (a); 	g_assert (b);
+	memcpy(&b->availCap, &a->availCap, sizeof (gdouble));		
 	memcpy(&b->cost, &a->cost, sizeof(gdouble));	
-	memcpy (&b->delay, &a->delay, sizeof (gdouble));
+	memcpy(&b->delay, &a->delay, sizeof (gdouble));
+	memcpy(&b->power, &a->power, sizeof(gdouble));
+	b->numRouteElements = a->numRouteElements;
 	for (gint k = 0; k < a->numRouteElements; k++) {			
 		//DEBUG_PC ("aNodeId: %s // zNodeId: %s", a->routeElement[k].aNodeId.nodeId, a->routeElement[k].zNodeId.nodeId);
 		// aNodeId duplication
 		struct nodes_t *n1 = &(a->routeElement[k].aNodeId);
 		struct nodes_t *n2 = &(b->routeElement[k].aNodeId);			
-		duplicate_node_id (n1, n2);			
-					
+		duplicate_node_id (n1, n2);					
 		//zNodeId duplication
 		n1 = &(a->routeElement[k].zNodeId);
 		n2 = &(b->routeElement[k].zNodeId);			
@@ -615,14 +612,14 @@ void duplicate_path (struct compRouteOutputItem_t *a, struct compRouteOutputItem
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void duplicate_path_t(struct compRouteOutputItem_t* a, struct path_t* b)
-{
-	g_assert(a);
-	g_assert(b);
+void duplicate_path_t(struct compRouteOutputItem_t* a, struct path_t* b) {
+	g_assert(a); g_assert(b);
 
+	// transfer path characteristics ...
 	memcpy(&b->path_capacity.value, &a->availCap, sizeof(gdouble));
 	memcpy(&b->path_cost.cost_value, &a->cost, sizeof(gdouble));
 	memcpy(&b->path_latency.fixed_latency, &a->delay, sizeof(gdouble));
+	memcpy(&b->path_power.power, &a->power, sizeof(gdouble));
 
 	b->numPathLinks = a->numRouteElements;
 
@@ -661,23 +658,17 @@ void duplicate_path_t(struct compRouteOutputItem_t* a, struct path_t* b)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint get_map_index_by_nodeId (gchar *nodeId, struct map_nodes_t * mapN)
-{
-    gint index = -1;
-    gint i = 0;
-    
-    for (i = 0; i < mapN->numMapNodes; i++)
-    {
+gint get_map_index_by_nodeId (gchar *nodeId, struct map_nodes_t * mapN) {
+    gint i = 0;    
+    for (i = 0; i < mapN->numMapNodes; i++) {
 		//DEBUG_PC ("i: %d; current: %s // targeted: %s", i, mapN->map[i].verticeId.nodeId, nodeId);
-        if (memcmp (mapN->map[i].verticeId.nodeId, nodeId, strlen (nodeId)) == 0)
-        {
-            index = i;
-			//DEBUG_PC ("Index: %d", index);
-            return index;            
+        if (memcmp (mapN->map[i].verticeId.nodeId, nodeId, strlen (nodeId)) == 0) {
+			//DEBUG_PC ("Index: %d", i);
+			return i;            
         }
     }
 	//DEBUG_PC ("Index: %d", index);
-    return index;
+    return -1;
 }
 
 ////////////////////////////////////////////////////////////////////////////////////////
@@ -693,14 +684,11 @@ gint get_map_index_by_nodeId (gchar *nodeId, struct map_nodes_t * mapN)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void get_edge_from_map_by_node (struct edges_t *e, struct nodes_t* v, struct map_nodes_t *mapN) {
-	
+void get_edge_from_map_by_node (struct edges_t *e, struct nodes_t* v, struct map_nodes_t *mapN) {	
 	//DEBUG_PC ("Get the Edge into map from node v: %s", v.nodeId);	
 	// Get the edge reaching the node v from mapNodes
-	gint map_vIndex = get_map_index_by_nodeId (v->nodeId, mapN);
-	
-	//DEBUG_PC ("aNodeId: %s --> zNodeId: %s", mapN->map[map_vIndex].predecessor.aNodeId.nodeId, mapN->map[map_vIndex].predecessor.zNodeId.nodeId);
-	
+	gint map_vIndex = get_map_index_by_nodeId (v->nodeId, mapN);	
+	//DEBUG_PC ("aNodeId: %s --> zNodeId: %s", mapN->map[map_vIndex].predecessor.aNodeId.nodeId, mapN->map[map_vIndex].predecessor.zNodeId.nodeId);	
 	struct edges_t *te = &(mapN->map[map_vIndex].predecessor);	
 	duplicate_edge (e, te);
 	return;
@@ -721,7 +709,6 @@ void get_edge_from_map_by_node (struct edges_t *e, struct nodes_t* v, struct map
 /////////////////////////////////////////////////////////////////////////////////////////
 void get_edge_from_predecessors (struct edges_t *e, struct nodes_t* n, struct pred_t *predecessors) {
 	g_assert(predecessors);
-
 	DEBUG_PC ("Get edge outgoing node %s from predecessors list", n->nodeId);
 	//print_predecessors (predecessors);
 	for (gint i = 0; i < predecessors->numPredComp; i++) {
@@ -751,14 +738,13 @@ void get_edge_from_predecessors (struct edges_t *e, struct nodes_t* n, struct pr
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, struct service_t *s) {
-	//DEBUG_PC ("\n");
 	// Get the source device Id	of the network connectivity service
 	struct nodes_t *v = create_node();
+	// Src Node of the Service set to v
 	duplicate_string(v->nodeId, s->service_endpoints_id[0].device_uuid);
-
-	struct edges_t* e = create_edge();
-							  	
+								  	
 	// Get the edge for v in predecessors
+	struct edges_t* e = create_edge();
 	get_edge_from_predecessors (e, v, predecessors);	
 	// Get the target for e
 	struct nodes_t u;	
@@ -778,14 +764,12 @@ void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, s
 	duplicate_string(p->routeElement[k].contextId, s->serviceId.contextId);
 	p->numRouteElements++;
 
-	// Get the destination device Id of the network connectivity service
+	// Get Dst Node of connectivity service
 	struct nodes_t* dst = create_node();
 	duplicate_string(dst->nodeId, s->service_endpoints_id[1].device_uuid);
-	while (compare_node_id (&u, dst) != 0)	
-	{
+	while (compare_node_id (&u, dst) != 0) {
 		k++; 
-		p->numRouteElements++;
-		// v = u		
+		p->numRouteElements++;			
 		duplicate_node_id (&u, v);
 		get_edge_from_predecessors (e, v, predecessors);
 		// Get the target u		
@@ -798,10 +782,7 @@ void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, s
 		duplicate_string(p->routeElement[k].linkId, e->linkId);
 		duplicate_string(p->routeElement[k].aTopologyId, e->aTopologyId);
 		duplicate_string(p->routeElement[k].zTopologyId, e->zTopologyId);
-		duplicate_string(p->routeElement[k].contextId, s->serviceId.contextId);
-
-		// copy the contextId
-		//duplicate_string(p->routeElement[k].contextId, s->service_endpoints_id[0].topology_id.contextId);
+		duplicate_string(p->routeElement[k].contextId, s->serviceId.contextId);		
 	}		
 	g_free(e); g_free(v); g_free(pathCons);
 	//DEBUG_PC ("Path is constructed");	
@@ -819,22 +800,19 @@ void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, s
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void print_graph (struct graph_t *g)
-{	     
+void print_graph (struct graph_t *g) {
+	g_assert(g);
     DEBUG_PC ("================================================================");
     DEBUG_PC ("===========================   GRAPH   ==========================");
     DEBUG_PC ("================================================================");
-
-	DEBUG_PC("Graph Num Vertices: %d", g->numVertices);
+	DEBUG_PC("Graph Num Vertices: %d", g->numVertices);    
     
-    gint i = 0, j = 0, k = 0;
-    for (i = 0; i < g->numVertices; i++)
-    {
+    for (gint i = 0; i < g->numVertices; i++) {
         DEBUG_PC ("Head Vertice [%s]", g->vertices[i].verticeId.nodeId);
-        for (j = 0; j < g->vertices[i].numTargetedVertices; j++)
+        for (gint j = 0; j < g->vertices[i].numTargetedVertices; j++)
         {
             DEBUG_PC ("  Tail Vertice: %s", g->vertices[i].targetedVertices[j].tVertice.nodeId);
-            for (k = 0; k < g->vertices[i].targetedVertices[j].numEdges; k++)
+            for (gint k = 0; k < g->vertices[i].targetedVertices[j].numEdges; k++)
             {
                 struct edges_t *e = &(g->vertices[i].targetedVertices[j].edges[k]);
 				DEBUG_PC ("%s(%s) --> %s(%s) [C: %f, Bw: %f b/s, Delay: %f ms]", e->aNodeId.nodeId, e->aEndPointId, e->zNodeId.nodeId, 
@@ -992,8 +970,7 @@ gint graph_targeted_vertice_add (gint vIndex, gchar *nodeId, struct graph_t *g)
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
-void remove_edge_from_graph (struct graph_t *g, struct edges_t *e)
-{
+void remove_edge_from_graph (struct graph_t *g, struct edges_t *e) {
 	// Find the ingress vertice into the graph
 	DEBUG_PC ("Removing from Graph %s[%s]) ---> %s[%s] (linkId: %s)", e->aNodeId.nodeId, e->aEndPointId, e->zNodeId.nodeId, e->aEndPointId, e->linkId);
 	gint verticeIndex = -1;		
@@ -1009,14 +986,13 @@ void remove_edge_from_graph (struct graph_t *g, struct edges_t *e)
 	if (targetedVerticeIndex == -1)	{
 		DEBUG_PC ("%s --> %s NOT in the Graph!!", e->aNodeId.nodeId, e->zNodeId.nodeId);
 		return;
-	}
-	
+	}	
 	//DEBUG_PC ("%s --> %s found in the Graph", e->aNodeId.nodeId, e->zNodeId.nodeId);
 	
 	// Get the edge position
 	gint edgeIndex = -1;
 	edgeIndex = graph_edge_lookup (verticeIndex, targetedVerticeIndex, e, g);
-	if (edgeIndex == -1) 	{
+	if (edgeIndex == -1) {
 		DEBUG_PC ("%s --> %s NOT in the Graph!!", e->aNodeId.nodeId, e->zNodeId.nodeId);
 		return;
 	}
@@ -1046,11 +1022,9 @@ void remove_edge_from_graph (struct graph_t *g, struct edges_t *e)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct path_set_t * create_path_set ()
-{
+struct path_set_t * create_path_set () {
 	struct path_set_t * p = g_malloc0 (sizeof (struct path_set_t));
-	if (p == NULL)
-	{
+	if (p == NULL) {
 		DEBUG_PC ("Memory allocation problem");
 		exit (-1);		
 	}
@@ -1068,10 +1042,8 @@ struct path_set_t * create_path_set ()
  *	@date 2021
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void remove_path_set(struct path_set_t* p)
-{
-	g_assert(p);
-	g_free(p);
+void remove_path_set(struct path_set_t* p) {
+	g_assert(p); g_free(p);
 	return;
 }
 
@@ -1087,15 +1059,14 @@ void remove_path_set(struct path_set_t* p)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void build_map_node (struct map_nodes_t *mapN, struct graph_t *g)
-{
+void build_map_node (struct map_nodes_t *mapN, struct graph_t *g) {
 	//DEBUG_PC ("Construction of the Map of Nodes");               
-    for (gint i = 0; i < g->numVertices; i++)
-    {	
+    for (gint i = 0; i < g->numVertices; i++) {	
 		duplicate_node_id (&g->vertices[i].verticeId, &mapN->map[i].verticeId);
         mapN->map[i].distance = INFINITY_COST;
         mapN->map[i].avaiBandwidth = 0.0;
         mapN->map[i].latency = INFINITY_COST;
+		mapN->map[i].power = INFINITY_COST;
         mapN->numMapNodes++;
     }
     //DEBUG_PC ("mapNodes formed by %d Nodes", mapN->numMapNodes);
@@ -1107,22 +1078,137 @@ void build_map_node (struct map_nodes_t *mapN, struct graph_t *g)
  * 	@file pathComp_tools.c
  * 	@brief Allocate memory for path of struct compRouteOutputList_t *
  *
- *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct compRouteOutputList_t * create_route_list ()
-{
+struct compRouteOutputList_t * create_route_list () {
 	struct compRouteOutputList_t *p = g_malloc0 (sizeof (struct compRouteOutputList_t));
-	if (p == NULL)
-	{
+	if (p == NULL) {
 		DEBUG_PC ("Memory Allocation Problem");
 		exit (-1);
 	}
 	return p;
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Copy all the attributes defining a path
+ *
+ * @param dst_path
+ * @param src_path
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void copy_path(struct path_t* dst_path, struct path_t* src_path) {
+	g_assert(dst_path);
+	g_assert(src_path);
+
+	// Path capacity
+	dst_path->path_capacity.unit = src_path->path_capacity.unit;
+	memcpy(&dst_path->path_capacity.value, &src_path->path_capacity.value, sizeof(gdouble));
+
+	// Path latency
+	memcpy(&dst_path->path_latency.fixed_latency, &src_path->path_latency.fixed_latency, sizeof(gdouble));
+
+	// Path cost
+	duplicate_string(dst_path->path_cost.cost_name, src_path->path_cost.cost_name);
+	memcpy(&dst_path->path_cost.cost_value, &src_path->path_cost.cost_value, sizeof(gdouble));
+	memcpy(&dst_path->path_cost.cost_algorithm, &src_path->path_cost.cost_algorithm, sizeof(gdouble));
+
+	// Path links
+	dst_path->numPathLinks = src_path->numPathLinks;
+	for (gint i = 0; i < dst_path->numPathLinks; i++) {
+		struct pathLink_t* dPathLink = &(dst_path->pathLinks[i]);
+		struct pathLink_t* sPathLink = &(src_path->pathLinks[i]);
+
+		duplicate_string(dPathLink->linkId, sPathLink->linkId);
+		duplicate_string(dPathLink->aDeviceId, sPathLink->aDeviceId);
+		duplicate_string(dPathLink->zDeviceId, sPathLink->zDeviceId);
+		duplicate_string(dPathLink->aEndPointId, sPathLink->aEndPointId);
+		duplicate_string(dPathLink->zEndPointId, sPathLink->zEndPointId);
+
+		duplicate_string(dPathLink->topologyId.contextId, sPathLink->topologyId.contextId);
+		duplicate_string(dPathLink->topologyId.topology_uuid, sPathLink->topologyId.topology_uuid);
+
+		dPathLink->numLinkTopologies = sPathLink->numLinkTopologies;
+		for (gint j = 0; j < dPathLink->numLinkTopologies; j++) {
+			struct linkTopology_t* dLinkTop = &(dPathLink->linkTopologies[j]);
+			struct linkTopology_t* sLinkTop = &(sPathLink->linkTopologies[j]);
+
+			duplicate_string(dLinkTop->topologyId, sLinkTop->topologyId);
+		}
+	}
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Duplicate the route output instance
+ *
+ * @param dst_ro
+ * @param src_ro
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void duplicate_compRouteOuput(struct compRouteOutput_t* dst_ro, struct compRouteOutput_t* src_ro) {
+	g_assert(dst_ro); g_assert(src_ro); 
+		
+	// Copy the serviceId
+	copy_service_id(&dst_ro->serviceId, &src_ro->serviceId);
+	dst_ro->num_service_endpoints_id = src_ro->num_service_endpoints_id;
+
+	for (gint j = 0; j < dst_ro->num_service_endpoints_id; j++) {
+		struct service_endpoints_id_t* iEp = &(src_ro->service_endpoints_id[j]);
+		struct service_endpoints_id_t* oEp = &(dst_ro->service_endpoints_id[j]);
+		copy_service_endpoint_id(oEp, iEp);
+	}
+
+	// Copy paths
+	dst_ro->numPaths = src_ro->numPaths;
+	for (gint j = 0; j < dst_ro->numPaths; j++) {
+		struct path_t* dst_path = &(dst_ro->paths[j]);
+		struct path_t* src_path = &(src_ro->paths[j]);
+		copy_path(dst_path, src_path);
+	}
+	// copy no path issue value
+	dst_ro->noPathIssue = src_ro->noPathIssue;
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Duplicate the computation route output list
+ * 
+ * @param dst
+ * @param src
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void duplicate_route_list(struct compRouteOutputList_t* dst, struct compRouteOutputList_t* src) {
+	g_assert(src); g_assert(dst);
+
+	dst->numCompRouteConnList = src->numCompRouteConnList;
+	dst->compRouteOK = src->compRouteOK;
+	memcpy(&dst->compRouteConnAvBandwidth, &src->compRouteConnAvBandwidth, sizeof(gdouble));
+	memcpy(&dst->compRouteConnAvPathLength, &src->compRouteConnAvPathLength, sizeof(gdouble));
+	for (gint i = 0; i < src->numCompRouteConnList; i++) {
+		struct compRouteOutput_t* src_ro = &(src->compRouteConnection[i]);
+		struct compRouteOutput_t* dst_ro = &(dst->compRouteConnection[i]);
+		duplicate_compRouteOuput(dst_ro, src_ro);
+	}	
+	return;
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -1133,8 +1219,7 @@ struct compRouteOutputList_t * create_route_list ()
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct compRouteOutputItem_t *create_path_item ()
-{
+struct compRouteOutputItem_t *create_path_item () {
 	struct compRouteOutputItem_t *p = g_malloc0 (sizeof (struct compRouteOutputItem_t));
 	if (p == NULL) 	{
 		DEBUG_PC ("Memory Allocation Problem");
@@ -1146,71 +1231,86 @@ struct compRouteOutputItem_t *create_path_item ()
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
- * 	@brief Sort the set of paths according to the metric (1st criteria) and latency (2nd criteria)
+ * 	@brief Sort the set of paths the AvailBw, Cost and Delay
  *
  *	@params setP
+ *  @params args
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void sort_path_set(struct path_set_t* setP) {
+void sort_path_set(struct path_set_t* setP, guint args) {
 	g_assert(setP);
-	// Sort the paths contained in setP by shotest metric and latency	
-	float epsilon = 0.0000001;
+	// Sort the paths contained in setP by:
+	// 1st Criteria: The path cost (maybe bound to link distance)
+	// 2nd Criteria: The consumed path power 
+	// 3nd Criteria: The path latency
+	// 3rd Criteria: The available Bw
+	float epsilon = 0.1;
 
 	for (gint i = 0; i < setP->numPaths; i++) {
 		for (gint j = 0; j < (setP->numPaths - i - 1); j++)	{
 			struct compRouteOutputItem_t* path1 = &setP->paths[j];
-			struct compRouteOutputItem_t* path2 = &setP->paths[j + 1];
-			
+			struct compRouteOutputItem_t* path2 = &setP->paths[j + 1];			
 			struct compRouteOutputItem_t* pathTmp = create_path_item();
-			// 1st Criteria (avail Bw)
-			if ((path2->availCap - path1->availCap > 0.0) && (fabs(path1->availCap - path2->availCap) > epsilon)) {
+			//////////////////////// Criterias ////////////////////////////////////////
+			// 1st Criteria (Cost)
+			if (path2->cost < path1->cost) {
 				duplicate_path(path1, pathTmp);
 				duplicate_path(path2, path1);
 				duplicate_path(pathTmp, path2);
 				g_free(pathTmp);
 				continue;
 			}
-			else if ((path1->availCap - path2->availCap > 0.0) && (fabs(path1->availCap - path2->availCap) > epsilon)) {
-				g_free(pathTmp);
-				continue;
-			}
-			// likely the same available bw between path1 and path2
-			else if (fabs(path1->availCap - path2->availCap) < epsilon) {
-				// 2nd criteria: sort path cost
-				if (path1->cost > path2->cost) {
-					duplicate_path(path1, pathTmp);
-					duplicate_path(path2, path1);
-					duplicate_path(pathTmp, path2);
-					g_free(pathTmp);
-					continue;
-				}
-				else if (path1->cost < path2->cost) {
-					g_free(pathTmp);
-					continue;
-				}
-				// 3rd criteria: same path cost, prioritize the one with lowest e2e latency
-				else if (path1->cost == path2->cost) {
-					if ((path2->delay - path1->delay > 0.0) && (fabs(path1->delay - path2->delay) > epsilon)) {
+			if (path2->cost == path1->cost) {
+				// 2nd Criteria (Energy)
+				if (args & ENERGY_EFFICIENT_ARGUMENT) {
+					if (path2->power < path1->power) {
+						duplicate_path(path1, pathTmp);
+						duplicate_path(path2, path1);
+						duplicate_path(pathTmp, path2);
+						g_free(pathTmp);
+						continue;
+					}
+					else {	  // path1->power < path2->power
 						g_free(pathTmp);
 						continue;
 					}
-					else if ((path1->delay - path2->delay > 0.0) && (fabs(path1->delay - path2->delay) > epsilon)) {
+				}
+				else { // No enery efficient argument
+					// 3rd Criteria (latency)
+					if (path2->delay < path1->delay) {
 						duplicate_path(path1, pathTmp);
 						duplicate_path(path2, path1);
 						duplicate_path(pathTmp, path2);
 						g_free(pathTmp);
 						continue;
 					}
-					// Same bw, same cost and same latency, path1 and path2 are practically the same
-					else if (fabs(path1->delay - path2->delay) < epsilon) {
+					else if (path1->delay < path2->delay) {
 						g_free(pathTmp);
 						continue;
 					}
+					else { // path1->delay == path2->delay
+						// 4th Criteria (available bw)
+						if (path2->availCap > path1->availCap) {
+							duplicate_path(path1, pathTmp);
+							duplicate_path(path2, path1);
+							duplicate_path(pathTmp, path2);
+							g_free(pathTmp);
+							continue;
+						}
+						else {
+							g_free(pathTmp);
+							continue;
+						}
+					}
 				}
-			}			
+			}
+			else {	// path1->cost < path2->cost
+				g_free(pathTmp);
+				continue;
+			}				
 		}
 	}
 	return;
@@ -1249,8 +1349,7 @@ void pop_front_path_set (struct path_set_t *setP) {
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void add_routeElement_path_back (struct routeElement_t *rE, struct compRouteOutputItem_t *p)
-{
+void add_routeElement_path_back (struct routeElement_t *rE, struct compRouteOutputItem_t *p) {
 	//DEBUG_PC ("p->numRouteElements: %d", p->numRouteElements);
 	p->numRouteElements++;
 	gint index = p->numRouteElements - 1;
@@ -1268,7 +1367,6 @@ void add_routeElement_path_back (struct routeElement_t *rE, struct compRouteOutp
 	duplicate_string(p->routeElement[index].linkId, rE->linkId);
 	duplicate_string(p->routeElement[index].aTopologyId, rE->aTopologyId);
 	duplicate_string(p->routeElement[index].zTopologyId, rE->zTopologyId);
-
 	return;
 }
 
@@ -1332,21 +1430,19 @@ gboolean matching_path_rootPath (struct compRouteOutputItem_t *ap, struct compRo
 /////////////////////////////////////////////////////////////////////////////////////////
 void modify_targeted_graph (struct graph_t *g, struct path_set_t *A, struct compRouteOutputItem_t * rootPath, struct nodes_t * spurNode) {
 	//DEBUG_PC ("Modify the Targeted graph according to the Yen algorithm principles");
-	for (gint j = 0; j < A->numPaths; j++)
-	{
+	for (gint j = 0; j < A->numPaths; j++) {
 		struct compRouteOutputItem_t *ap = &A->paths[j];
-		struct edges_t *e = create_edge ();
+		struct edges_t *e = create_edge();
 		gboolean ret =  FALSE;
 		ret = matching_path_rootPath (ap, rootPath, spurNode, e);		
 		if (ret == TRUE) {
-			//DEBUG_PC ("Removal %s [%u]--> %s [%u] from the graph", e->aNodeId.nodeId, e->aLinkId, e->zNodeId.nodeId, e->zLinkId);
+			DEBUG_PC ("Removal %s[%s] --> %s[%s] from the graph", e->aNodeId.nodeId, e->aEndPointId, e->zNodeId.nodeId, e->aEndPointId);
 			remove_edge_from_graph (g, e);
 			//DEBUG_PC ("Print Resulting Graph");
-			//print_graph (g);
+			print_graph (g);
 			g_free (e);			
 		}
-		if (ret == FALSE)
-		{
+		if (ret == FALSE) {
 			g_free (e);
 			continue;
 		}						
@@ -1399,54 +1495,45 @@ gint find_nodeId (gconstpointer data, gconstpointer userdata)
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struct graph_t *g, 
-				struct service_t *s, GList **S, GList **Q, struct map_nodes_t *mapNodes) { 
-	g_assert(g);
-	g_assert(s);
-	g_assert(mapNodes);
+				struct service_t *s, GList **S, GList **Q, struct map_nodes_t *mapNodes, 
+				guint arg) { 
+	g_assert(g); g_assert(s); g_assert(mapNodes);
 
 	struct targetNodes_t *v = &(g->vertices[indexGraphU].targetedVertices[indexGraphV]);	
-    DEBUG_PC("Explored link (u ===> v):");
-	DEBUG_PC("u: %s ====>", u->node.nodeId);
-	DEBUG_PC("====> v: %s", v->tVertice.nodeId);
+    DEBUG_PC("Explored Link %s => %s)", u->node.nodeId, v->tVertice.nodeId);
+	//DEBUG_PC("\t %s => %s", u->node.nodeId, v->tVertice.nodeId);	
     
     // v already explored in S? then, discard it
     GList *found = g_list_find_custom (*S, v->tVertice.nodeId, find_nodeId);
     if (found != NULL) {
-        DEBUG_PC ("v (%s) in S, discard to explore it!", v->tVertice.nodeId);        
+        DEBUG_PC ("v (%s) in S, Discard", v->tVertice.nodeId);        
         return 0;
     }
 
 	// Get the set of constraints imposed by the service
 	struct path_constraints_t* path_constraints = get_path_constraints(s);
-    gdouble distance_through_u = INFINITY_COST;
-    gdouble latency_through_u = INFINITY_COST;	
-	gint i = 0;
-
-    // Check bandwidth requirement is fulfillied on edge u --> v    
-    gint foundAvailBw = 0;
-    gdouble edgeAvailBw = 0.0;
+    gdouble distance_through_u = INFINITY_COST ,latency_through_u = INFINITY_COST, power_through_u = INFINITY_COST;
+	gint i = 0, foundAvailBw = 0;
+    // BANDWIDTH requirement to be fulfilled on EDGE u->v        
+    gdouble edgeAvailBw = 0.0, edgeTotalBw = 0.0;
     for (i = 0; i < v->numEdges; i++) {        
         struct edges_t *e = &(v->edges[i]);
 		memcpy (&edgeAvailBw, &(e->availCap), sizeof (gdouble));
-		DEBUG_PC("edge:u ===> v");
-        DEBUG_PC ("%s[%s] ===>", u->node.nodeId, e->aEndPointId);
-		DEBUG_PC("====> %s[%s]", v->tVertice.nodeId, e->zEndPointId);
-		DEBUG_PC("edge available bw: %f", edgeAvailBw);
-
-        // if network service constraint specifies "bandwidth" needs (assuming coherent units)
-		if (path_constraints->bw == TRUE) {
-			if (edgeAvailBw < path_constraints->bwConstraint) {
-				continue;
-			}
-			else {
-				foundAvailBw = 1;
-				break;
-			}
+		memcpy(&edgeTotalBw, &(e->totalCap), sizeof(gdouble));
+		DEBUG_PC("EDGE %s[%s] => %s[%s]", u->node.nodeId, e->aEndPointId, v->tVertice.nodeId, e->zEndPointId);
+        //DEBUG_PC ("\t %s[%s] =>", u->node.nodeId, e->aEndPointId);
+		//DEBUG_PC("\t => %s[%s]", v->tVertice.nodeId, e->zEndPointId);
+		DEBUG_PC("\t AvailBw: %f, TotalBw: %f", edgeAvailBw, edgeTotalBw);
+        // Check Service Bw constraint
+		if ((path_constraints->bw == TRUE) && (edgeAvailBw < path_constraints->bwConstraint))
+			continue;
+		else {
+			foundAvailBw = 1;
+			break;
 		}		
     }
-	// if bw path constraint is specified but not sastified ...	discard the edge
-    if ((path_constraints->bw == TRUE) && (foundAvailBw == 0))
-    {
+	// BW constraint NOT MET, then DISCARD edge
+    if ((path_constraints->bw == TRUE) && (foundAvailBw == 0)) {
         DEBUG_PC ("AvailBw: %f < path_constraint: %f -- Discard Edge", edgeAvailBw, path_constraints->bwConstraint);
 		g_free(path_constraints);
         return 0;    
@@ -1457,7 +1544,12 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
     gint map_uIndex = get_map_index_by_nodeId (u->node.nodeId, mapNodes);
 	struct map_t *u_map = &mapNodes->map[map_uIndex];
     distance_through_u = u_map->distance + v->edges[indexEdge].cost;
-    latency_through_u = u_map->latency + v->edges[indexEdge].delay;    
+    latency_through_u = u_map->latency + v->edges[indexEdge].delay;
+	// Consumed power at v through u is the sum
+	// 1. Power from src to u
+	// 2. Power-idle at node u
+	// 3. power consumed over the edge between u and v, i.e. energy*usedBw
+	power_through_u = u_map->power + g->vertices[indexGraphU].power_idle + ((edgeTotalBw - edgeAvailBw + path_constraints->bwConstraint) * (v->edges[indexEdge].energy));
     gdouble availBw_through_u = 0.0;
 
 	// ingress endpoint (u) is the src of the request
@@ -1476,7 +1568,7 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
 			memcpy (&availBw_through_u, &edgeAvailBw, sizeof (gdouble));
 		} 
     }     
-    // Relax the link according to the pathCost and latency
+    // Relax the link according to the pathCost, latency, and energy
     gint map_vIndex = get_map_index_by_nodeId (v->tVertice.nodeId, mapNodes);
 	struct map_t *v_map = &mapNodes->map[map_vIndex];
     // If cost dist (u, v) > dist (src, v) relax the link
@@ -1484,17 +1576,35 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
         //DEBUG_PC ("dist(src, u) + dist(u, v): %f > dist (src, v): %f --> Discard Link", distance_through_u, v_map->distance);  
         return 0;
     }
-    // If dist (src, u) + dist (u, v) = current dist(src, v), then use the latency as discarding criteria
-    if ((distance_through_u == v_map->distance) && (latency_through_u > v_map->latency)) {
-        //DEBUG_PC ("dist(src, u) + dist(u,v) = current dist(src, v), but latency (src,u) + latency (u, v) > current latency (src, v)");          
-        return 0;
-    }	
-	// If dist (src, u) + dist (u,v) == current dist(src, v) AND latency (src, u) + latency (u, v) == current latency (src, v), the available bandwidth is the criteria
-	if ((distance_through_u ==  v_map->distance) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) {
-		return 0;
-	}    
+	// If energy consumption optimization is requested
+	if (arg & ENERGY_EFFICIENT_ARGUMENT) {
+		if (distance_through_u == v_map->distance) {
+			if (power_through_u > v_map->power) {
+				DEBUG_PC("Energy (src -> u + u -> v: %f (Watts) >Energy (src, v): %f (Watts)--> DISCARD LINK", power_through_u, v_map->power);
+				return 0;
+			}
+			// same energy consumption, consider latency
+			if ((power_through_u == v_map->power) && (latency_through_u > v_map->latency)) {
+				return 0;
+			}
+			if ((power_through_u == v_map->power) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) {
+				return 0;
+			}
+		}
+	} // No optimization, rely on latency and available e2e bandwidth
+	else {
+		// If dist (src, u) + dist (u, v) = current dist(src, v), then use the latency as discarding criteria
+		if ((distance_through_u == v_map->distance) && (latency_through_u > v_map->latency)) {
+			//DEBUG_PC ("dist(src, u) + dist(u,v) = current dist(src, v), but latency (src,u) + latency (u, v) > current latency (src, v)");          
+			return 0;
+		}
+		// If dist (src, u) + dist (u,v) == current dist(src, v) AND latency (src, u) + latency (u, v) == current latency (src, v), the available bandwidth is the criteria
+		if ((distance_through_u == v_map->distance) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) {
+			return 0;
+		}
+	}
     DEBUG_PC ("%s --> %s Relaxed", u->node.nodeId, v->tVertice.nodeId);
-    DEBUG_PC ("\t AvailBw: %f Mb/s, Cost: %f, Latency: %f ms", availBw_through_u, distance_through_u, latency_through_u);
+    DEBUG_PC ("\t AvailBw: %f Mb/s, Cost: %f, Latency: %f ms, Energy: %f Watts", availBw_through_u, distance_through_u, latency_through_u, power_through_u);
     
     // Update Q list -- 
     struct nodeItem_t *nodeItem = g_malloc0 (sizeof (struct nodeItem_t));
@@ -1505,26 +1615,31 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
     nodeItem->distance = distance_through_u;
 	memcpy(&nodeItem->distance, &distance_through_u, sizeof(gdouble));		     
 	memcpy(&nodeItem->latency, &latency_through_u, sizeof(gdouble));
+	memcpy(&nodeItem->power, &power_through_u, sizeof(gdouble));
 	duplicate_node_id (&v->tVertice, &nodeItem->node);	
 	// add node to the Q list
-    *Q = g_list_insert_sorted (*Q, nodeItem, sort_by_distance);
-    //DEBUG_PC ("%s ADDED to Q (length: %d)", nodeItem->node.nodeId, g_list_length(*Q));    
+	if (arg & ENERGY_EFFICIENT_ARGUMENT) {
+		*Q = g_list_insert_sorted(*Q, nodeItem, sort_by_energy);
+	}
+	else
+		*Q = g_list_insert_sorted (*Q, nodeItem, sort_by_distance);
     
-    // Update the mapNodes for the specific reached tv   
+	// Update the mapNodes for the specific reached tv   
     v_map->distance = distance_through_u;
 	memcpy(&v_map->distance, &distance_through_u, sizeof(gdouble));
     memcpy (&v_map->avaiBandwidth, &availBw_through_u, sizeof (gdouble));
     memcpy (&v_map->latency, &latency_through_u, sizeof (gdouble));
+	memcpy(&v_map->power, &power_through_u, sizeof(gdouble));
     // Duplicate the predecessor edge into the mapNodes 
 	struct edges_t *e1 = &(v_map->predecessor);
 	struct edges_t *e2 = &(v->edges[indexEdge]);
-	duplicate_edge (e1, e2);	
+	duplicate_edge(e1, e2);	
 	DEBUG_PC ("u->v Edge: %s(%s) --> %s(%s)", e2->aNodeId.nodeId, e2->aEndPointId, e2->zNodeId.nodeId, e2->zEndPointId);
-	DEBUG_PC("v-pred aTopology: %s", e2->aTopologyId);
+	//DEBUG_PC("v-pred aTopology: %s", e2->aTopologyId);
 	DEBUG_PC("v-pred zTopology: %s", e2->zTopologyId);
 
     // Check whether v is dstPEId
-	//DEBUG_PC ("Targeted dstPEId: %s", req->dstPEId.nodeId);
+	//DEBUG_PC ("Targeted dstId: %s", s->service_endpoints_id[1].device_uuid);
 	//DEBUG_PC ("nodeId added to the map: %s", v_map->verticeId.nodeId);
 	//DEBUG_PC ("Q Length: %d", g_list_length(*Q));
 	g_free(path_constraints);
@@ -1547,7 +1662,6 @@ gboolean check_computed_path_feasability (struct service_t *s, struct compRouteO
 	float epsilon = 0.0000001;
 	struct path_constraints_t* pathCons = get_path_constraints(s);
 	gboolean ret = TRUE;
-
 	if (pathCons->latency == TRUE) {
 		if ((pathCons->latencyConstraint - p->delay > 0.0) || (fabs(pathCons->latencyConstraint - p->delay) < epsilon)) {
 			DEBUG_PC("Computed Path (latency: %f) is feasible wrt Connection Demand: %f", p->delay, pathCons->latencyConstraint);
@@ -1558,8 +1672,7 @@ gboolean check_computed_path_feasability (struct service_t *s, struct compRouteO
 			return FALSE;
 		}
 	}
-	// Other constraints...
-	
+	// Other constraints...		
 	g_free(pathCons);
 	return ret;
 }
@@ -1569,12 +1682,13 @@ gboolean check_computed_path_feasability (struct service_t *s, struct compRouteO
  * 	@file pathComp_tools.c
  * 	@brief Sorting the GList Q items by distance 
  * 
+ * @param a
+ * @param b
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint sort_by_distance (gconstpointer a, gconstpointer b)
-{
+gint sort_by_distance (gconstpointer a, gconstpointer b) {
 	//DEBUG_PC ("sort by distance a and b");	
 	g_assert(a != NULL);
 	g_assert(b != NULL);
@@ -1592,13 +1706,55 @@ gint sort_by_distance (gconstpointer a, gconstpointer b)
 		return 1;
 	else if (node1->distance < node2->distance)
 		return 0;
-	if (node1->distance == node2->distance)
-	{
+	if (node1->distance == node2->distance) {
 		if (node1->latency > node2->latency)
 			return 1;
 		else if (node1->latency <= node2->latency)
 			return 0;
 	}
+	return 0;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Sorting the GList Q items by distance
+ * 
+ * @param a
+ * @param b
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+gint sort_by_energy(gconstpointer a, gconstpointer b) {	
+	g_assert(a != NULL);
+	g_assert(b != NULL);
+
+	//DEBUG_PC ("sort by distance a and b");	  
+	struct nodeItem_t* node1 = (struct nodeItem_t*)a;
+	struct nodeItem_t* node2 = (struct nodeItem_t*)b;
+	g_assert(node1);
+	g_assert(node2);
+	
+	//1st criteria: sorting by lowest distance
+	if (node1->distance > node2->distance)
+		return 1;
+	if (node1->distance < node2->distance)
+		return 0;
+
+	// 2nd Criteria: sorting by the lowest energy
+	if (node1->power > node2->power)
+		return 1;
+	if (node1->power < node1->power)
+		return 0;
+
+	// 3rd Criteria: by the latency 
+	if (node1->latency > node2->latency)
+		return 1;
+	if (node1->latency <= node2->latency)
+		return 0;
+	return 0;
 }
 
 ////////////////////////////////////////////////////////////////////////////////////////
@@ -1613,8 +1769,7 @@ gint sort_by_distance (gconstpointer a, gconstpointer b)
 /////////////////////////////////////////////////////////////////////////////////////////
 struct graph_t * create_graph () {
 	struct graph_t * g = g_malloc0 (sizeof (struct graph_t));
-	if (g == NULL)
-	{
+	if (g == NULL) {
 		DEBUG_PC ("Memory Allocation Problem");
 		exit (-1);
 	}
@@ -1633,8 +1788,7 @@ struct graph_t * create_graph () {
 /////////////////////////////////////////////////////////////////////////////////////////
 struct map_nodes_t * create_map_node ()	 {
 	struct map_nodes_t * mN = g_malloc0 (sizeof (struct map_nodes_t));
-	if (mN == NULL)
-	{
+	if (mN == NULL) {
 		DEBUG_PC ("Memory allocation failed");
 		exit (-1);
 	}
@@ -1652,78 +1806,18 @@ struct map_nodes_t * create_map_node ()	 {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-
 struct service_t* get_service_for_computed_path(gchar* serviceUUID) {
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		struct service_t* s = &(serviceList->services[i]);
-		if (strcmp(s->serviceId.service_uuid, serviceUUID) == 0)
-			return s;
+	gint i = 0;
+	for(GList *listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+			struct service_t* s = (struct service_t*)(listnode->data);
+			if (strcmp(s->serviceId.service_uuid, serviceUUID) == 0)
+				return s;
 	}
 	return NULL;
 }
 
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for struct deviceList_t
- *
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct deviceList_t* create_device_list()
-{
-	struct deviceList_t* dList = g_malloc0(sizeof(struct deviceList_t));
-	if (dList == NULL)
-	{
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	return dList;
-}
-
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for struct linkList_t
- *
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct linkList_t* create_link_list() {
-	struct linkList_t* lList = g_malloc0(sizeof(struct linkList_t));
-	if (lList == NULL)
-	{
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	lList->numLinks = 0;
-	return lList;
-}
-
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for struct serviceList_t
- *
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct serviceList_t* create_service_list() {
-	struct serviceList_t* sList = g_malloc0(sizeof(struct serviceList_t));
-	if (sList == NULL)
-	{
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	return sList;
-}
-
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -1766,8 +1860,7 @@ void print_service_type(guint type) {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void print_link_port_direction(guint direction)
-{
+void print_link_port_direction(guint direction) {
 	switch (direction) {
 		case LINK_PORT_DIRECTION_BIDIRECTIONAL:
 			//DEBUG_PC("Bidirectional Port Direction");
@@ -1796,8 +1889,7 @@ void print_link_port_direction(guint direction)
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void print_termination_direction(guint direction)
-{
+void print_termination_direction(guint direction) {
 	switch (direction) {
 	case TERMINATION_DIRECTION_BIDIRECTIONAL:
 		//DEBUG_PC("Bidirectional Termination Direction");
@@ -1922,26 +2014,6 @@ void print_link_forwarding_direction(guint linkFwDir) {
 	return;
 }
 
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for the contextSet
- *
- * @param 
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct contextSet_t* create_contextSet() {
-	struct contextSet_t* c = g_malloc0(sizeof(struct contextSet_t));
-	if (c == NULL) {
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	return c;
-}
-
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -1954,18 +2026,19 @@ struct contextSet_t* create_contextSet() {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct context_t* find_contextId_in_set(gchar* contextUuid, struct contextSet_t* set) {
-
-	g_assert(set);
-	//DEBUG_PC("Checking if contextId: %s in in the ContextList??", contextUuid);
-
-	for (gint i = 0; i < set->num_context_set; i++) { 	
-		struct context_t* c = &(set->contextList[i]);
+struct context_t* find_contextId_in_set(gchar* contextUuid, GList** set) {
+	//DEBUG_PC("Checking if contextId: %s in in the ContextSet??", contextUuid);
+	gint i = 0;
+	for (GList *ln = g_list_first(*set);
+		ln;
+		ln = g_list_next(ln)){
+		struct context_t* c = (struct context_t*)(ln->data);
 		//DEBUG_PC("Context Item [%d] Id: %s", i, c->contextId);
 		if (strcmp(contextUuid, c->contextId) == 0) {
 			//DEBUG_PC("contextId: %s is FOUND in the ContextSet_List", contextUuid);
 			return c;
 		}
+		i++;
 	}
 	//DEBUG_PC("contextId: %s NOT FOUND in the ContextSet_List", contextUuid);
 	return NULL;
@@ -1983,11 +2056,19 @@ struct context_t* find_contextId_in_set(gchar* contextUuid, struct contextSet_t*
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct context_t* add_contextId_in_set(gchar *contextUuid, struct contextSet_t *set) {
+struct context_t* add_contextId_in_set(gchar *contextUuid, GList** set) {
 
-	set->num_context_set++;
-	struct context_t* c = &(set->contextList[set->num_context_set - 1]);
+	struct context_t* c = g_malloc0(sizeof(struct context_t));
+	if (c == NULL) {
+		DEBUG_PC("Memory Allocation Failure");
+		exit(-1);
+	}
 	duplicate_string(c->contextId, contextUuid);
+	// Add the context into the context set
+	//DEBUG_PC("Adding ContextId: %s", contextUuid);
+	//DEBUG_PC(" (BEFORE ADDING) Context Set Length: %d", g_list_length(*set));
+	*set = g_list_append(*set, c);
+	//DEBUG_PC(" (AFTER ADDING) Context Set Length: %d", g_list_length(*set));
 	return c;
 }
 
@@ -2004,9 +2085,7 @@ struct context_t* add_contextId_in_set(gchar *contextUuid, struct contextSet_t *
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 struct vertices_t* find_vertex_in_graph_context(struct graph_t *g, gchar* deviceId) {
-
-	for (gint i = 0; i < g->numVertices; i++)
-	{
+	for (gint i = 0; i < g->numVertices; i++) {
 		struct vertices_t* v = &(g->vertices[i]);
 		if (strcmp(v->verticeId.nodeId, deviceId) == 0) {
 			return v;
@@ -2027,10 +2106,11 @@ struct vertices_t* find_vertex_in_graph_context(struct graph_t *g, gchar* device
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct vertices_t* add_vertex_in_graph(struct graph_t* g, gchar* deviceId) {
+struct vertices_t* add_vertex_in_graph(struct graph_t* g, struct device_t *d) {
 	g->numVertices++;
 	struct vertices_t* v = &(g->vertices[g->numVertices - 1]);
-	duplicate_string(v->verticeId.nodeId, deviceId);
+	duplicate_string(v->verticeId.nodeId, d->deviceId);
+	memcpy(&v->power_idle, &d->power_idle, sizeof(gdouble));
 	return v;
 }
 
@@ -2040,17 +2120,24 @@ struct vertices_t* add_vertex_in_graph(struct graph_t* g, gchar* deviceId) {
  * 	@brief Construct the graphs (vertices and edges) bound to every individual context
  *
  * @param cSet
+ * @param activeFlag
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void build_contextSet_deviceList(struct contextSet_t* cSet) {
-	// Check every device their endpoints
-	for (gint i = 0; i < deviceList->numDevices; i++) {
-		struct device_t* d = &(deviceList->devices[i]);
+void build_contextSet_deviceList(GList** cSet, gint activeFlag) {
+	// Check every device their endpoints	
+	for (GList* listnode = g_list_first(deviceList); 
+		listnode; 
+		listnode = g_list_next(listnode)) {	
+		struct device_t* d = (struct device_t*)(listnode->data);
 		//DEBUG_PC("Exploring DeviceId: %s", d->deviceId);
 
+		if ((activeFlag == 1) && (d->operational_status != 2)) {
+			// it is only considered devices with operational status enabled, i.e., set to 2
+			continue;
+		}
 		// Check the associated endPoints
 		for (gint j = 0; j < d->numEndPoints; j++) {
 			struct endPoint_t* eP = &(d->endPoints[j]);
@@ -2058,18 +2145,17 @@ void build_contextSet_deviceList(struct contextSet_t* cSet) {
 			struct endPointId_t* ePid = &(eP->endPointId);  //end point id
 			//DEBUG_PC("   EndPointId: %s || Type: %s", eP->endPointId.endpoint_uuid, d->deviceType);
 			//DEBUG_PC("   TopologyId: %s || ContextId: %s", eP->endPointId.topology_id.topology_uuid, eP->endPointId.topology_id.contextId);
-
 			// Add contextId in ContextSet and the deviceId (+endpoint) into the vertex set
 			struct context_t *c = find_contextId_in_set(eP->endPointId.topology_id.contextId, cSet);
 			if (c == NULL) {
-				//DEBUG_PC("   contextUuid: %s MUST BE ADDED to ContextSet", eP->endPointId.topology_id.contextId);
+				DEBUG_PC("   contextUuid: %s MUST BE ADDED to ContextSet", eP->endPointId.topology_id.contextId);
 				c = add_contextId_in_set(eP->endPointId.topology_id.contextId, cSet);
 			}
 			// Check if the deviceId and endPointUuid are already considered in the graph of the context c
 			struct vertices_t* v = find_vertex_in_graph_context(&c->g, d->deviceId);
 			if (v == NULL) {
 				//DEBUG_PC("  deviceId: %s MUST BE ADDED to the Context Graph", d->deviceId);
-				v = add_vertex_in_graph(&c->g, d->deviceId);
+				v = add_vertex_in_graph(&c->g, d);
 			}
 		}
 	}
@@ -2132,8 +2218,10 @@ struct targetNodes_t* add_targeted_vertex_in_graph_context(struct vertices_t* v,
  /////////////////////////////////////////////////////////////////////////////////////////
 struct endPoint_t* find_device_tied_endpoint(gchar* devId, gchar* endPointUuid) {
 	//DEBUG_PC("devId: %s ePId: %s", devId, endPointUuid);
-	for (gint i = 0; i < deviceList->numDevices; i++) {
-		struct device_t* d = &(deviceList->devices[i]);
+	for (GList* ln = g_list_first(deviceList);
+		ln;
+		ln = g_list_next(ln)) {
+		struct device_t* d = (struct device_t*)(ln->data);
 		if (strcmp(d->deviceId, devId) != 0) {
 			continue;
 		}
@@ -2156,51 +2244,65 @@ struct endPoint_t* find_device_tied_endpoint(gchar* devId, gchar* endPointUuid)
  *
  * @param w
  * @param l
+ * @param activeFlag
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void add_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l) {
+void add_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l, gint activeFlag) {
 	//DEBUG_PC("\t targetedVertex: %s", w->tVertice.nodeId);
+
+	// Check if the activeFlag is 1. If YES, it is only added to the edges as long as the 
+	// associated endPoint is in status ENABLED, i.e., with operational status set to 2
+	// Get the endpoints (A and Z) of the link l (assumed P2P)
+	struct link_endpointId_t* aEndpointId = &(l->linkEndPointId[0]);
+	struct link_endpointId_t* zEndpointId = &(l->linkEndPointId[1]);
+	// Get the endPoint Information tied to the device bound to aEndPointId
+	struct endPoint_t* eP = find_device_tied_endpoint(aEndpointId->deviceId, aEndpointId->endPointId);
+	if (eP == NULL) {
+		DEBUG_PC("devId: %s endPointUuid: %s NOT in Device List!!--- Weird", aEndpointId->deviceId, aEndpointId->endPointId);
+		exit(-1);
+	}
+	// Check whether the port in that endPoint (eP) is Active upon the activeFlag being SET
+	if (activeFlag == 1) {
+		if (eP->operational_status != 2) // NOT ENABLED, then discard this link
+			return;
+	}
+
+	// Add the edge into the graph
 	w->numEdges++;
 	struct edges_t* e = &(w->edges[w->numEdges - 1]);
 	// Copy the link Id UUID
 	duplicate_string(e->linkId, l->linkId);
-
-	// copy the deviceId and endpointsIds (A --> Z)
-	struct link_endpointId_t* aEndpointId = &(l->linkEndPointId[0]);
 	duplicate_string(e->aNodeId.nodeId, aEndpointId->deviceId);
 	duplicate_string(e->aEndPointId, aEndpointId->endPointId);
-	duplicate_string(e->aTopologyId, aEndpointId->topology_id.topology_uuid);
-
-	struct link_endpointId_t* zEndpointId = &(l->linkEndPointId[1]);
+	duplicate_string(e->aTopologyId, aEndpointId->topology_id.topology_uuid);	
 	duplicate_string(e->zNodeId.nodeId, zEndpointId->deviceId);
 	duplicate_string(e->zEndPointId, zEndpointId->endPointId);
 	duplicate_string(e->zTopologyId, zEndpointId->topology_id.topology_uuid);
-
-	// The potential and available capacity is indeed retrieved using aEndpointId in the deviceList
-	struct endPoint_t* eP = find_device_tied_endpoint(aEndpointId->deviceId, aEndpointId->endPointId);
-	if (eP == NULL) {
-		DEBUG_PC("devId: %s endPointUuid: %s NOT in Device List!!--- Weird", aEndpointId->deviceId, aEndpointId->endPointId);
-		exit(-1);
-	}
+	
 	//Potential(total) and available capacity
 	e->unit = eP->potential_capacity.unit;
 	memcpy(&e->totalCap, &eP->potential_capacity.value, sizeof(gdouble));
 	memcpy(&e->availCap, &eP->available_capacity.value, sizeof(gdouble));
-
 	// Copy interdomain local/remote Ids
 	memcpy(e->interDomain_localId, eP->inter_domain_plug_in.inter_domain_plug_in_local_id, 
 		strlen(eP->inter_domain_plug_in.inter_domain_plug_in_local_id));
 	memcpy(e->interDomain_remoteId, eP->inter_domain_plug_in.inter_domain_plug_in_remote_id,
 		strlen(eP->inter_domain_plug_in.inter_domain_plug_in_remote_id));
-
 	// cost value
 	memcpy(&e->cost, &l->cost_characteristics.cost_value, sizeof(gdouble));
-
-	// latency
+	// latency ms
 	memcpy(&e->delay, &l->latency_characteristics.fixed_latency, sizeof(gdouble));
+	// energy J/bits ~ power
+	memcpy(&e->energy, &eP->energyConsumption, sizeof(gfloat));
+	
+	//DEBUG_PC("Edge - Total/Available Capacity: %f/%f; Cost: %f; Delay: %f, Energy: %f", eP->potential_capacity.value, eP->available_capacity.value,
+	//	l->cost_characteristics.cost_value, l->latency_characteristics.fixed_latency, l->energy_link);
+
+	//DEBUG_PC("Graph Edge - Total/Available Capacity: %f/%f; Cost: %f; Delay: %f, Energy: %f", e->totalCap, e->availCap,
+	//	e->cost, e->delay, e->energy);
 	return;
 }
 
@@ -2216,8 +2318,7 @@ void add_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l)
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct edges_t* find_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l) {
-		
+struct edges_t* find_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l) {		
 	for (gint i = 0; i < w->numEdges; i++) {
 		struct edges_t* e = &(w->edges[i]);
 		if (strcmp(e->linkId, l->linkId) == 0) {
@@ -2234,32 +2335,37 @@ struct edges_t* find_edge_in_targetedVertice_set(struct targetNodes_t* w, struct
  * contents/info of the link list
  *
  * @param set
+ * @param activeFlag
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void build_contextSet_linklList(struct contextSet_t* set) {
-	g_assert(set);
-	
+void build_contextSet_linklList(GList** set, gint activeFlag) {	
 	// for each link in linkList:
 	// 1st- Retrieve endpoints A --> B feauture (context Id, device Id, endpoint Id)
 	// 2st - In the graph associated to the contextId, check wheter A (deviceId) is in the vertices list
 	// o No, this is weird ... exist
 	// o Yes, get the other link endpoint (i.e., B) and check whether it exists. If NOT add it, considering
 	// all the attributes; Otherwise, check whether the link is different from existing edges between A and B
+	gdouble epsilon = 0.1;
+	gint j = 0;
+	for (GList* ln = g_list_first(linkList);
+		ln;
+		ln = g_list_next(ln)) {
+		struct link_t* l = (struct link_t*)(ln->data);
+		j++;
 
-	for (gint j = 0; j < linkList->numLinks; j++) {
-		struct link_t* l = &(linkList->links[j]);
-		// link assumed to be P2P A --> B; I.e. 2 endPoints; 1st specifies A and 2nd specifie B
+		// link assumed to be P2P A --> B; i.e. 2 endPoints; 1st specifies A and 2nd specifie B
 		struct link_endpointId_t* aEndpointId = &(l->linkEndPointId[0]);
 		struct topology_id_t* topologyId = &(aEndpointId->topology_id);
 		// get the contextId
 		gchar contextUuid[UUID_CHAR_LENGTH];
 		duplicate_string(contextUuid, topologyId->contextId);
-		//DEBUG_PC("Link: %s in Context: %s", l->linkId, contextUuid);
+		DEBUG_PC("Link: %s in ContextId: %s", l->linkId, contextUuid);
 
 		// Check first contextUuid exists in the cSet
+		//DEBUG_PC("Length of Context: %d", g_list_length(set));
 		struct context_t* c = find_contextId_in_set(contextUuid, set);
 		if (c == NULL) {
 			DEBUG_PC("ContextId: %s does NOT exist... weird", contextUuid);
@@ -2273,28 +2379,29 @@ void build_contextSet_linklList(struct contextSet_t* set) {
 		struct graph_t* g = &(c->g); // get the graph associated to the context c
 		struct vertices_t* v = find_vertex_in_graph_context(g, aDeviceId);
 		if (v == NULL) {
-			DEBUG_PC("aDeviceId: %s IS NOT IN Vertices of contextId: %s", aDeviceId, contextUuid);
+			DEBUG_PC("%s NOT a VERTEX of contextId: %s ... WEIRD", aDeviceId, contextUuid);
 			exit(-1);
 		}		
 		// get the bEndpointId
 		struct link_endpointId_t* bEndpointId = &(l->linkEndPointId[1]);
 		gchar bDeviceId[UUID_CHAR_LENGTH];
 		duplicate_string(bDeviceId, bEndpointId->deviceId);
+		DEBUG_PC("[%d] -- Link: %s [%s ==> %s]", j-1, l->linkId, aDeviceId, bDeviceId);
 		// Check whether device B is in the targeted Vertices from A (i.e., v)?
 		// If not, add B in the targeted vertices B + create the edge and add it
 		// If B exist, check whether the explored link/edge is already in the list of edges
 		struct targetNodes_t* w = find_targeted_vertex_in_graph_context(v, bDeviceId);
 		if (w == NULL) {
-			//DEBUG_PC("B device [%s] is PEER of A device [%s]", bDeviceId, v->verticeId.nodeId);
+			DEBUG_PC("[%s] is PEER of [%s]", bDeviceId, v->verticeId.nodeId);
 			w = add_targeted_vertex_in_graph_context(v, bDeviceId);
-			add_edge_in_targetedVertice_set(w, l);
+			add_edge_in_targetedVertice_set(w, l, activeFlag);
 		}
 		else {
 			// w exists, it is needed to check whether the edge (link) should be added
 			struct edges_t* e = find_edge_in_targetedVertice_set(w, l);
 			if (e == NULL) {
 				// Add the link into the list
-				add_edge_in_targetedVertice_set(w, l);
+				add_edge_in_targetedVertice_set(w, l, activeFlag);
 			}
 			else {
 				DEBUG_PC("The link already exists ...");
@@ -2316,21 +2423,47 @@ void build_contextSet_linklList(struct contextSet_t* set) {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void build_contextSet(struct contextSet_t* cSet) {
-	g_assert(cSet);
-	g_assert(deviceList);
-	g_assert(linkList);
+void build_contextSet(GList** cSet) {
+	gint activeFlag = 0; // this means that all the devices/links (regardless they are active or not) are considered
 
 	// devices are tied to contexts, i.e. depending on the contextId of the devices
-	build_contextSet_deviceList(cSet);
+	build_contextSet_deviceList(cSet, activeFlag);
+
+	DEBUG_PC("Length for the Context Set: %d", g_list_length(*cSet));
 
 	// Once the diverse contexts are created and the devices/endpoints asigned to the 
 	// respective graph tied to each context, it is needed to create the edges
-	build_contextSet_linklList(cSet);
+	build_contextSet_linklList(cSet, activeFlag);
 
 	return;
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Create the set of (distinct) contexts with the deviceList and linkList with
+ * operational status active
+ *
+ * @param cSet
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void build_contextSet_active(GList** cSet) {
+	gint activeFlag = 1; // this means that all the devices (regardless they are active or not) are considered
+
+	// devices are tied to contexts, i.e. depending on the contextId of the devices
+	build_contextSet_deviceList(cSet, activeFlag);
+
+	DEBUG_PC("Length for the Context Set: %d", g_list_length(*cSet));
+
+	// Once the diverse contexts are created and the devices/endpoints asigned to the 
+	// respective graph tied to each context, it is needed to create the edges
+	build_contextSet_linklList(cSet, activeFlag);
+	return;
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -2342,11 +2475,14 @@ void build_contextSet(struct contextSet_t* cSet) {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void print_contextSet(struct contextSet_t* set) {
-	g_assert(set);
+void print_contextSet(GList* set) {
+
+	DEBUG_PC("Printing the ContextSet w/ number of Elements: %d", g_list_length(set));
 
-	for (gint i = 0; i < set->num_context_set; i++) {
-		struct context_t* c = &(set->contextList[i]);
+	for (GList* ln = g_list_first(set);
+		ln;
+		ln = g_list_next(ln)) {
+		struct context_t* c = (struct context_t*)(ln->data);
 		DEBUG_PC("-------------------------------------------------------------");
 		DEBUG_PC(" Context Id: %s", c->contextId);
 		DEBUG_PC("-------------------------------------------------------------");
@@ -2422,8 +2558,7 @@ gint same_src_dst_pe_nodeid(struct service_t* s)
 /////////////////////////////////////////////////////////////////////////////////////////
 void comp_route_connection_issue_handler (struct compRouteOutput_t *path, struct service_t *s)
 {
-	g_assert(path);
-	g_assert(s);
+	g_assert(path); g_assert(s);
 
 	// Increase the number of computed routes/paths despite there was an issue to be reported		
 	path->numPaths++;	
@@ -2472,8 +2607,7 @@ void destroy_compRouteOutputList (struct compRouteOutputList_t *ro)
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_graph (struct graph_t *originalGraph, struct graph_t *destGraph)	{
-	g_assert (originalGraph);
-	g_assert (destGraph);
+	g_assert (originalGraph); g_assert (destGraph);
 	
 	destGraph->numVertices = originalGraph->numVertices;
 	for (gint i = 0; i < originalGraph->numVertices; i++) {
@@ -2481,6 +2615,7 @@ void duplicate_graph (struct graph_t *originalGraph, struct graph_t *destGraph)
 		struct vertices_t *dVertex = &(destGraph->vertices[i]);
 		dVertex->numTargetedVertices = oVertex->numTargetedVertices;		
 		duplicate_node_id (&oVertex->verticeId, &dVertex->verticeId);
+		memcpy(&dVertex->power_idle, &oVertex->power_idle, sizeof(gdouble));
 		
 		for (gint j = 0; j < oVertex->numTargetedVertices; j++)	{
 			struct targetNodes_t *oTargetedVertex = &(oVertex->targetedVertices[j]);
@@ -2596,9 +2731,7 @@ struct edges_t* get_reverse_edge_from_the_graph(struct edges_t* e, struct graph_
 /////////////////////////////////////////////////////////////////////////////////////////
 void allocate_graph_resources (struct path_t *p, struct service_t *s, struct graph_t *g)
 {
-	g_assert (p);
-	g_assert (s);
-	g_assert (g);
+	g_assert (p); g_assert (s); g_assert (g);
 	// Retrieve the requested bw by the service
 	struct path_constraints_t* pathCons = get_path_constraints(s);
 
@@ -2617,8 +2750,7 @@ void allocate_graph_resources (struct path_t *p, struct service_t *s, struct gra
 		memcpy(&e->availCap, &resBw, sizeof(gdouble));
 		DEBUG_PC("Final e/link avail Bw: %f", e->availCap);	
 	}
-	g_free(pathCons);
-	
+	g_free(pathCons);	
 	return;
 }
 
@@ -2633,14 +2765,12 @@ void allocate_graph_resources (struct path_t *p, struct service_t *s, struct gra
  *	@parma g
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2021
+ *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void allocate_graph_reverse_resources(struct path_t* p, struct service_t * s, struct graph_t* g)
 {
-	g_assert(p);
-	g_assert(s);
-	g_assert(g);
+	g_assert(p); g_assert(s); g_assert(g);
 
 	struct path_constraints_t* pathCons = get_path_constraints(s);
 	for (gint i = 0; i < p->numPathLinks; i++) {
@@ -2674,20 +2804,20 @@ void allocate_graph_reverse_resources(struct path_t* p, struct service_t * s, st
  *	@param routeList
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2021
+ *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void print_path_connection_list(struct compRouteOutputList_t* routeList) {
 	g_assert(routeList);
 	for (gint i = 0; i < routeList->numCompRouteConnList; i++) {
-		DEBUG_PC("==================== Service Item: %d ===================", i);
+		DEBUG_PC("==================== Service instance: %d ===================", i);
 		struct compRouteOutput_t* rO = &(routeList->compRouteConnection[i]);
 		DEBUG_PC("num service endpoints: %d", rO->num_service_endpoints_id);
 		struct serviceId_t* s = &(rO->serviceId);
 		DEBUG_PC("ContextId: %s, ServiceId: %s", s->contextId, s->service_uuid);
-		DEBUG_PC("ingress --- %s [%s]", rO->service_endpoints_id[0].device_uuid, 
+		DEBUG_PC("ingress - %s[%s]", rO->service_endpoints_id[0].device_uuid, 
 			rO->service_endpoints_id[0].endpoint_uuid);
-		DEBUG_PC("egress --- %s [%s]", rO->service_endpoints_id[1].device_uuid,
+		DEBUG_PC("egress - %s [%s]", rO->service_endpoints_id[1].device_uuid,
 			rO->service_endpoints_id[1].endpoint_uuid);
 
 		if (rO->noPathIssue == NO_PATH_CONS_ISSUE) {
@@ -2713,7 +2843,7 @@ void print_path_connection_list(struct compRouteOutputList_t* routeList) {
  *	@param d
  *
  *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2021
+ *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct timeval d, gint numSuccesPathComp, gint numPathCompIntents) {
@@ -2729,8 +2859,11 @@ void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct
 	DEBUG_PC("Succesfully Comp: %d | Path Comp Requests: %d", numSuccesPathComp, numPathCompIntents);
 	DEBUG_PC("AV. PATH COMP ALG. TIME: %f ms", av_alg_comp_time);
 
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		struct service_t* s = &(serviceList->services[i]);
+	gint i = 0;
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+		struct service_t* s = (struct service_t*)(listnode->data);
 		char* eptr;
 		for (gint j = 0; j < s->num_service_constraints; j++) {
 			struct constraint_t* constraints = &(s->constraints[j]);
@@ -2739,6 +2872,7 @@ void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct
 			}
 		}
 	}
+
 	for (gint k = 0; k < routeConnList->numCompRouteConnList; k++) {
 		struct compRouteOutput_t* rO = &(routeConnList->compRouteConnection[k]);
 		if (rO->noPathIssue == NO_PATH_CONS_ISSUE) {
@@ -2764,5 +2898,500 @@ void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct
 	gdouble avBlockedBwRatio = (gdouble)(1.0 - avServedRatio);
 	DEBUG_PC("AV. BBE: %f", avBlockedBwRatio);
 	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate active service	path
+ *
+ *  @param actServPath
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_active_service_path(struct activeServPath_t* actServPath) {
+	g_assert(actServPath);
+	g_free(actServPath);
+	return;
+}
 
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate active service
+ *
+ *  @param actService
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_active_service(struct activeService_t* actService) {
+	g_assert(actService);
+	g_list_free_full(g_steal_pointer(&actService->activeServPath), (GDestroyNotify)destroy_active_service_path);
+	g_free(actService);
+	return;
 }
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a requested service 
+ *
+ *  @param s
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_requested_service(struct service_t* s) {
+	g_assert(s);
+	g_free(s);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a device
+ *
+ *  @param d
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_device(struct device_t* d) {
+	g_assert(d);
+	g_free(d);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a link	from the linkList
+ *
+ *  @param d
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_link(struct link_t* l) {
+	g_assert(l);
+	g_free(l);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a context from the contextSet
+ *
+ *  @param d
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_context(struct context_t* c) {
+	g_assert(c);
+	g_free(c);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Excecution Dijkstra algorithm
+ *
+ *  @param srcMapIndex
+ *  @param dstMapIndex
+ *	@param g
+ *	@param s
+ *  @param mapNodes
+ *  @param SN
+ *  @param RP
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct service_t* s, 
+	struct map_nodes_t* mapNodes, struct nodes_t* SN, struct compRouteOutputItem_t* RP,
+	guint arg) {
+	g_assert(s);g_assert(g);
+
+	// Set params into mapNodes related to the source nodes of the request
+	mapNodes->map[srcMapIndex].distance = 0.0;
+	mapNodes->map[srcMapIndex].latency = 0.0;
+	mapNodes->map[srcMapIndex].avaiBandwidth = 0.0;
+	mapNodes->map[srcMapIndex].power = 0.0;
+
+	// Initialize the set Q and S
+	GList *S = NULL, *Q = NULL;
+	gint indexVertice = -1;
+
+	//  Add the source into the Q
+	struct nodeItem_t* nodeItem = g_malloc0(sizeof(struct nodeItem_t));
+	if (nodeItem == NULL) {
+		DEBUG_PC("memory allocation failed\n");
+		exit(-1);
+	}
+	// initialize some nodeItem attributes
+	nodeItem->distance = 0.0;
+	nodeItem->latency = 0.0;
+	nodeItem->power = 0.0;
+	duplicate_node_id(&mapNodes->map[srcMapIndex].verticeId, &nodeItem->node);
+
+	// Select the optimization process
+	if (arg & ENERGY_EFFICIENT_ARGUMENT)
+		Q = g_list_insert_sorted(Q, nodeItem, sort_by_energy);
+	// more "if" according to different optimization criteria ...
+	else
+		Q = g_list_insert_sorted(Q, nodeItem, sort_by_distance);
+
+	// Check whether there is spurNode (SN) and rootPath (RP)
+	if (SN != NULL && RP != NULL) {
+		struct routeElement_t* re;
+		for (gint j = 0; j < RP->numRouteElements; j++) {
+			// Get the source and target Nodes of the routeElement within the rootPath
+			re = &RP->routeElement[j];
+			DEBUG_PC("root Link: aNodeId: %s (%s) --> zNodeiId: %s (%s)", re->aNodeId.nodeId, re->aEndPointId, re->zNodeId.nodeId, re->zEndPointId);
+
+			// if ingress of the root link (aNodeId) is the spurNode, then stops
+			if (compare_node_id(&re->aNodeId, SN) == 0) {
+				DEBUG_PC("root Link: aNodeId: %s and spurNode: %s -- stop exploring the rootPath (RP)", re->aNodeId.nodeId, SN->nodeId);
+				break;
+			}
+			// Extract from Q
+			GList* listnode = g_list_first(Q);
+			struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
+			Q = g_list_remove(Q, node);
+
+			//DEBUG_RL_RA ("Exploring node %s", node->node.nodeId);
+			indexVertice = graph_vertice_lookup(node->node.nodeId, g);
+			g_assert(indexVertice >= 0);
+
+			// Get the indexTargetedVertice
+			gint indexTVertice = -1;
+			indexTVertice = graph_targeted_vertice_lookup(indexVertice, re->zNodeId.nodeId, g);
+			gint done = check_link(node, indexVertice, indexTVertice, g, s, &S, &Q, mapNodes, arg);
+			(void)done;
+			// Add to the S list
+			S = g_list_append(S, node);
+		}
+		// Check that the first node in Q set is SpurNode, otherwise something went wrong ...
+		if (compare_node_id(&re->aNodeId, SN) != 0) {
+			//DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId);
+			g_list_free_full(g_steal_pointer(&S), g_free);
+			g_list_free_full(g_steal_pointer(&Q), g_free);
+			return;
+		}
+	}
+
+	while (g_list_length(Q) > 0) {
+		//Extract from Q set
+		GList* listnode = g_list_first(Q);
+		struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
+		Q = g_list_remove(Q, node);
+		DEBUG_PC("Q length: %d", g_list_length(Q));
+		DEBUG_PC("DeviceId: %s", node->node.nodeId);
+
+		// visit all the links from u within the graph
+		indexVertice = graph_vertice_lookup(node->node.nodeId, g);
+		g_assert(indexVertice >= 0);
+
+		// Check the targeted vertices from u
+		for (gint i = 0; i < g->vertices[indexVertice].numTargetedVertices; i++) {
+			gint done = check_link(node, indexVertice, i, g, s, &S, &Q, mapNodes, arg);
+			(void)done;
+		}
+		// Add node into the S Set
+		S = g_list_append(S, node);
+		//DEBUG_PC ("S length: %d", g_list_length (S));              
+	}
+	g_list_free_full(g_steal_pointer(&S), g_free);
+	g_list_free_full(g_steal_pointer(&Q), g_free);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief KSP computation using Dijkstra algorithm
+ *
+ *  @param pred
+ *  @param g
+ *	@param s
+  *	@param SN
+ *	@param RP
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s,
+	struct nodes_t* SN, struct compRouteOutputItem_t* RP, 
+	struct map_nodes_t* mapNodes, guint arg) {
+	g_assert(pred); g_assert(g); g_assert(s);
+
+	DEBUG_PC("Source: %s -- Destination: %s", s->service_endpoints_id[0].device_uuid, s->service_endpoints_id[1].device_uuid);
+
+	// Check the both ingress src and dst endpoints are in the graph
+	gint srcMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[0].device_uuid, mapNodes);
+	if (srcMapIndex == -1) {
+		DEBUG_PC("ingress DeviceId: %s NOT in the graph", s->service_endpoints_id[0].device_uuid);
+		return -1;
+	}
+	
+	gint dstMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
+	if (dstMapIndex == -1) {
+		DEBUG_PC("egress DeviceId: %s NOT in the graph", s->service_endpoints_id[1].device_uuid);
+		return -1;
+	}
+
+	//DEBUG_PC("srcMapIndex: %d (node: %s)", srcMapIndex, mapNodes->map[srcMapIndex].verticeId.nodeId);
+	//DEBUG_PC("dstMapIndex: %d (node: %s)", dstMapIndex, mapNodes->map[dstMapIndex].verticeId.nodeId);
+
+	// Compute the shortest path route
+	dijkstra(srcMapIndex, dstMapIndex, g, s, mapNodes, SN, RP, arg);
+
+	// Check that a feasible solution in term of latency and bandwidth is found
+	gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
+	struct map_t* dest_map = &mapNodes->map[map_dstIndex];
+	if (!(dest_map->distance < INFINITY_COST)) {
+		DEBUG_PC("destination: %s NOT reachable", s->service_endpoints_id[1].device_uuid);
+		return -1;
+	}
+
+	DEBUG_PC("AvailBw @ %s is %f", dest_map->verticeId.nodeId, dest_map->avaiBandwidth);
+	// Check that the computed available bandwidth is larger than 0.0
+	if (dest_map->avaiBandwidth <= (gfloat)0.0) {
+		DEBUG_PC("dst: %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid);
+		return -1;
+	}
+	DEBUG_PC("dst: %s REACHABLE", s->service_endpoints_id[1].device_uuid);
+	// Handle predecessors
+	build_predecessors(pred, s, mapNodes);
+	return 1;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief set the path parameters (e.g., latency, cost, power, ...) to an under-constructed
+ * path from the computed map vertex
+ *
+ *  @param p
+ *  @param mapV
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void set_path_attributes(struct compRouteOutputItem_t* p, struct map_t* mapV) {
+	g_assert(p); g_assert(mapV);
+	memcpy(&p->cost, &mapV->distance, sizeof(gdouble));
+	memcpy(&p->availCap, &mapV->avaiBandwidth, sizeof(mapV->avaiBandwidth));
+	memcpy(&p->delay, &mapV->latency, sizeof(mapV->latency));
+	memcpy(&p->power, &mapV->power, sizeof(gdouble));
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief K-CSPF algorithm execution (YEN algorithm)
+ *
+ *  @param s
+ *  @param path
+ *  @param g
+ *  @param optimization_flag
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_t* g, guint arg) {
+	g_assert(s); g_assert(path); g_assert(g);
+
+	// create map of devices/nodes to handle the path computation using the context
+	struct map_nodes_t* mapNodes = create_map_node();
+	build_map_node(mapNodes, g);
+
+	// predecessors to store the computed path    
+	struct pred_t* predecessors = create_predecessors();
+	struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[0]);
+	struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]);
+
+	// Compute the 1st KSP path
+	gint done = ksp_comp(predecessors, g, s, NULL, NULL, mapNodes, arg);
+	if (done == -1) {
+		DEBUG_PC("NO PATH for %s[%s] --> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid);
+		comp_route_connection_issue_handler(path, s);
+		g_free(mapNodes); g_free(predecessors);
+		return;
+	}
+
+	// Construct the path from the computed predecessors
+	struct compRouteOutputItem_t* p = create_path_item();
+	//print_predecessors(predecessors);
+	build_path(p, predecessors, s);
+	gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
+	struct map_t* dst_map = &mapNodes->map[indexDest];
+	// Get the delay and cost
+	set_path_attributes(p, dst_map);		
+
+	// Add the computed path, it may be a not feasible path, but at the end it is
+	// checked all the feasible paths, and select the first one
+	print_path(p);
+	
+	// Copy the serviceId
+	copy_service_id(&path->serviceId, &s->serviceId);
+	// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
+	for (gint i = 0; i < s->num_service_endpoints_id; i++) {
+		struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[i]);
+		struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[i]);
+		copy_service_endpoint_id(oEp, iEp);
+	}
+	path->num_service_endpoints_id = s->num_service_endpoints_id;
+
+	DEBUG_PC("COMPUTE UP TO K Feasible Paths A[%d]", MAX_KSP_VALUE);	
+	// Create A and B sets of paths to handle the YEN algorithm
+	struct path_set_t *A = create_path_set(), *B = create_path_set();
+	// Add 1st Computed path into A->paths[0]	
+	duplicate_path(p, &A->paths[0]);
+	A->numPaths++;
+	g_free(predecessors); g_free(p);
+	for (gint k = 1; k < MAX_KSP_VALUE; k++) {
+		DEBUG_PC("*************************** kth (%d) ***********************************", k);
+		struct compRouteOutputItem_t* p = create_path_item();
+		duplicate_path(&A->paths[k - 1], p);
+		// The spurNode ranges from near-end node of the first link to the near-end of the last link forming the kth path
+		gint i = 0;
+		struct compRouteOutputItem_t* rootPath = create_path_item();
+		for (i = 0; i < p->numRouteElements; i++) {
+			struct nodes_t *spurNode = create_node(), *nextSpurNode = create_node();
+			struct routeElement_t* re = &(p->routeElement[i]);
+			// Create predecessors to store the computed path
+			struct pred_t* predecessors = create_predecessors();
+			// Clear previous mapNodes, i.e. create it again
+			g_free(mapNodes);
+			mapNodes = create_map_node();
+			build_map_node(mapNodes, g);
+			struct nodes_t* n = &re->aNodeId;
+			duplicate_node_id(n, spurNode);
+			n = &re->zNodeId;
+			duplicate_node_id(n, nextSpurNode);
+			DEBUG_PC("spurNode: %s --> nextSpurNode: %s", spurNode->nodeId, nextSpurNode->nodeId);
+
+			// rootPath contains a set of links of A[k-1] from the source Node till the SpurNode -> NextSpurNode
+			// Example: A[k-1] = {L1, L2, L3, L4}, i.e. " Node_a -- L1 --> Node_b -- L2 --> Node_c -- L3 --> Node_d -- L4 --> Node_e "
+			// E.g., for the ith iteration if the spurNode = Node_c and NextSpurNode = Node_d; then rootPath = {L1, L2, L3}			
+			add_routeElement_path_back(re, rootPath);
+			DEBUG_PC("\n");
+			DEBUG_PC("^^^^^^^rootPath^^^^^^^");
+			print_path(rootPath);
+
+			// For all existing and computed paths p in A check if from the source to the NextSpurNode
+			// the set of links matches with those contained in the rootPath
+			// If YES, remove from the auxiliary graph the next link in p from NextSpurNode
+			// Otherwise do nothing 
+			struct graph_t* gAux = create_graph();
+			duplicate_graph(g, gAux);
+			// Modified graph
+			modify_targeted_graph(gAux, A, rootPath, spurNode);
+
+			// Trigger the computation of the path from src to dst constrained to traverse all the links from src 
+			// to spurNode contained into rootPath over the resulting graph			
+			if (ksp_comp(predecessors, gAux, s, spurNode, rootPath, mapNodes, arg) == -1) {
+				DEBUG_PC("FAILED SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
+				g_free(nextSpurNode); g_free(spurNode);
+				g_free(gAux); g_free(predecessors);
+				continue;
+			}
+			DEBUG_PC("SUCCESFUL SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
+			// Create the node list from the predecessors
+			struct compRouteOutputItem_t* newKpath = create_path_item();
+			build_path(newKpath, predecessors, s);
+			DEBUG_PC("new K (for k: %d) Path is built", k);
+			gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
+			struct map_t* dst_map = &mapNodes->map[indexDest];
+			set_path_attributes(newKpath, dst_map);
+			DEBUG_PC("New PATH (@ kth: %d) ADDED to B[%d] - {Path Cost: %f, e2e latency: %f, bw: %f, Power: %f ", k, B->numPaths, newKpath->cost, 
+													newKpath->delay, newKpath->availCap, newKpath->power);
+			// Add the computed kth SP to the heap B
+			duplicate_path(newKpath, &B->paths[B->numPaths]);
+			B->numPaths++;
+			DEBUG_PC("Number of B paths: %d", B->numPaths);
+
+			g_free(newKpath); g_free(nextSpurNode); g_free(spurNode);
+			g_free(gAux); g_free(predecessors);
+		}
+		// If B is empty then stops
+		if (B->numPaths == 0) {
+			DEBUG_PC("B does not have any path ... the stops kth computation");
+			break;
+		}
+
+		// Sort the potential B paths according to different optimization parameters
+		sort_path_set(B, arg);
+		// Add the lowest path into A[k]		
+		DEBUG_PC("-------------------------------------------------------------");
+		DEBUG_PC("Append SP for B[0] to A[%d] --- Cost: %f, Latency: %f, Power: %f", A->numPaths, B->paths[0].cost, 
+																				B->paths[0].delay, B->paths[0].power);
+		duplicate_path(&B->paths[0], &A->paths[A->numPaths]);
+		A->numPaths++;
+		DEBUG_PC("A Set size: %d", A->numPaths);
+		DEBUG_PC("-------------------------------------------------------------");
+
+		// Remove/Pop front element from the path set B (i.e. remove B[0])
+		pop_front_path_set(B);
+		DEBUG_PC("B Set Size: %d", B->numPaths);
+	}
+
+	// Copy the serviceId
+	copy_service_id(&path->serviceId, &s->serviceId);
+	// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
+	for (gint m = 0; m < s->num_service_endpoints_id; m++) {
+		struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[m]);
+		struct service_endpoints_id_t* oEp = &(s->service_endpoints_id[m]);
+		copy_service_endpoint_id(oEp, iEp);
+	}
+
+	// Print all the paths i A
+	for (gint h = 0; h < A->numPaths; h++) {
+		DEBUG_PC("================== A[%d] =======================", h);
+		print_path(&A->paths[h]);
+	}
+	DEBUG_PC("Number of paths: %d", path->numPaths);
+	// For all the computed paths in A, pick the one being feasible wrt the service constraints
+	for (gint ksp = 0; ksp < A->numPaths; ksp++) {
+		if (ksp >= MAX_KSP_VALUE) {
+			DEBUG_PC("Number Requested paths (%d) REACHED - STOP", ksp);
+			break;
+		}
+		gdouble feasibleRoute = check_computed_path_feasability(s, &A->paths[ksp]);
+		if (feasibleRoute == TRUE) {
+			DEBUG_PC("A[%d] available: %f, pathCost: %f; latency: %f, Power: %f", ksp, A->paths[ksp].availCap, A->paths[ksp].cost, A->paths[ksp].delay, A->paths[ksp].power);
+			struct compRouteOutputItem_t* pathaux = &A->paths[ksp];
+			path->numPaths++;
+			struct path_t* targetedPath = &path->paths[path->numPaths - 1];
+			duplicate_path_t(pathaux, targetedPath);
+			print_path_t(targetedPath);
+			remove_path_set(A);
+			remove_path_set(B);
+			return;
+		}
+	}
+	remove_path_set(A);
+	remove_path_set(B);
+	// No paths found --> Issue	
+	DEBUG_PC("K-SP failed!!!");
+	comp_route_connection_issue_handler(path, s);
+	return;
+}
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_tools.h b/src/pathcomp/backend/pathComp_tools.h
index b6bcea04c8aa01b6cf730460e0075327f872f344..cac66f81c561502a6d93249f5e44a6195cb0f61b 100644
--- a/src/pathcomp/backend/pathComp_tools.h
+++ b/src/pathcomp/backend/pathComp_tools.h
@@ -23,12 +23,17 @@
 #include <uuid/uuid.h>
 
 // External variables
-extern struct map_nodes_t* mapNodes;
-extern struct graph_t* graph;
-extern struct contextSet_t* contextSet;
-extern struct linkList_t* linkList;
-extern struct deviceList_t* deviceList;
-extern struct serviceList_t* serviceList;
+extern GList* contextSet;
+extern GList* linkList;
+extern GList* deviceList;
+extern GList* serviceList;
+extern GList* activeServList;
+
+//////////////////////////////////////////////////////////
+// Optimization computation argument 
+//////////////////////////////////////////////////////////
+#define NO_OPTIMIZATION_ARGUMENT		0x00000000
+#define ENERGY_EFFICIENT_ARGUMENT		0x00000001
 
 #define INFINITY_COST                   0xFFFFFFFF
 #define MAX_NUM_PRED					100
@@ -54,8 +59,9 @@ struct nodes_t {
 
 struct nodeItem_t {
     struct nodes_t node;
-    gdouble distance;
-	gdouble latency;
+    gdouble distance; // traversed distance
+	gdouble latency; // incured latency
+	gdouble power; //consumed power
 };
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -80,7 +86,8 @@ struct edges_t {
 	gdouble totalCap, availCap;
 	
 	gdouble cost;	
-	gdouble delay;	
+	gdouble delay;
+	gdouble energy;
 
 	// inter-domain local and remote Ids
 	gchar interDomain_localId[MAX_INTER_DOMAIN_PLUG_IN_SIZE];
@@ -107,7 +114,8 @@ struct map_t {
 	struct edges_t predecessor;
 	gdouble distance;
 	gdouble avaiBandwidth;
-	gdouble latency;	
+	gdouble latency;
+	gdouble power;
 };
 
 #define MAX_MAP_NODE_SIZE				100
@@ -117,7 +125,7 @@ struct map_nodes_t {
 };
 
 #define MAX_NUM_VERTICES				20 // 100 # LGR: reduced from 100 to 20 to divide by 5 the memory used
-#define MAX_NUM_EDGES					40 // 100 # LGR: reduced from 100 to 40 to divide by 2.5 the memory used
+#define MAX_NUM_EDGES					5 // 100 # LGR: reduced from 100 to 5 to divide by 20 the memory used
 // Structures for the graph composition
 struct targetNodes_t {
 	// remote / targeted node
@@ -131,6 +139,7 @@ struct vertices_t {
 	struct targetNodes_t targetedVertices[MAX_NUM_VERTICES];
 	gint numTargetedVertices;
     struct nodes_t verticeId;
+	gdouble power_idle; // power idle of the device (due to the fans, etc.)
 };
 
 struct graph_t {
@@ -147,15 +156,6 @@ struct context_t {
 	struct graph_t g;
 };
 
-////////////////////////////////////////////////////
-// Structure for the Set of Contexts
-///////////////////////////////////////////////////
-#define MAX_NUMBER_CONTEXT		1 // 100 # LGR: reduced from 100 to 1 to divide by 100 the memory used
-struct contextSet_t {
-	struct context_t contextList[MAX_NUMBER_CONTEXT];
-	gint num_context_set;
-};
-
 #define MAX_ALG_ID_LENGTH		10
 ////////////////////////////////////////////////////
 // External Variables
@@ -241,33 +241,25 @@ struct endPoint_t {
 	struct capacity_t available_capacity;
 	// inter-domain identifiers
 	struct inter_domain_plug_in_t inter_domain_plug_in;
+	gfloat energyConsumption; // in nJ/bit
+	gint operational_status; // 0 Undefined, 1 Disabled, 2 Enabled
 };
 
 ///////////////////////////////////////////////////////////////////
 // Structure for the device contents
 ///////////////////////////////////////////////////////////////////
 #define MAX_DEV_TYPE_SIZE				128
-#define MAX_DEV_ENDPOINT_LENGTH			40	// 10 # LGR: controllers might have large number of endpoints
+#define MAX_DEV_ENDPOINT_LENGTH			50	// 10 # LGR: controllers might have large number of endpoints
 struct device_t {
+	gdouble power_idle; // power idle (baseline) of the switch in Watts
+	gint operational_status; // 0 - Undefined, 1 - Disabled, 2 - Enabled
 	gchar deviceId[UUID_CHAR_LENGTH]; // device ID using UUID (128 bits)
-
 	gchar deviceType[MAX_DEV_TYPE_SIZE]; // Specifies the device type
-
 	// define the endpoints attached to the device
 	gint numEndPoints;
 	struct endPoint_t endPoints[MAX_DEV_ENDPOINT_LENGTH];
 };
 
-///////////////////////////////////////////////////////////////////
-// Structure for the device List
-///////////////////////////////////////////////////////////////////
-#define MAX_NUM_DEVICE		200
-struct deviceList_t {
-	// device information
-	gint numDevices;
-	struct device_t devices[MAX_NUM_DEVICE];
-};
-
 ///////////////////////////////////////////////////////////////////
 // Structure for the link EndPoint Id
 ///////////////////////////////////////////////////////////////////
@@ -294,6 +286,13 @@ struct latency_characteristics_t {
 	gdouble fixed_latency;
 };
 
+///////////////////////////////////////////////////////////////////
+// Structure for the latency characteristics of the link
+///////////////////////////////////////////////////////////////////
+struct power_characteristics_t {
+	gdouble power;
+};
+
 ///////////////////////////////////////////////////////////////////
 // Structure for the link 
 ///////////////////////////////////////////////////////////////////
@@ -304,10 +303,10 @@ struct latency_characteristics_t {
 #define LINK_FORWARDING_DIRECTION_UNKNOWN						2
 struct link_t {
 	gchar linkId[UUID_CHAR_LENGTH]; // link Id using UUID (128 bits)
-
+	//gdouble energy_link; // in nJ/bit
+	//gint operational_status; // 0 Undefined, 1 Disabled, 2 Enabled
 	gint numLinkEndPointIds;
 	struct link_endpointId_t linkEndPointId[MAX_NUM_LINK_ENDPOINT_IDS];
-
 	guint forwarding_direction;
 	struct capacity_t potential_capacity;
 	struct capacity_t available_capacity;
@@ -315,15 +314,6 @@ struct link_t {
 	struct latency_characteristics_t latency_characteristics;
 };
 
-///////////////////////////////////////////////////////////////////
-// Structure for the link List
-///////////////////////////////////////////////////////////////////
-#define MAX_NUM_LIST							2000
-struct linkList_t {
-	gint numLinks;
-	struct link_t links[MAX_NUM_LIST];
-};
-
 ////////////////////////////////////////////////////
 // Structure for service Identifier
 ///////////////////////////////////////////////////
@@ -365,12 +355,10 @@ struct constraint_t {
 struct service_t {
 	// Indentifier used to determine the used Algorithm Id, e.g., KSP
 	gchar algId[MAX_ALG_ID_LENGTH];
-
 	// PATHS expected for the output
 	guint kPaths;
 	
 	struct serviceId_t serviceId;
-
 	guint service_type;	 // unknown, l2nm, l3nm, tapi
 
 	// endpoints of the network connectivity service, assumed p2p
@@ -403,10 +391,27 @@ struct path_constraints_t {
 ////////////////////////////////////////////////////
 // Structure for the handling the service requests
 ///////////////////////////////////////////////////
-#define MAX_SERVICE_LIST						100
-struct serviceList_t {
-	struct service_t services[MAX_SERVICE_LIST];
-	gint numServiceList;	
+//#define MAX_SERVICE_LIST						100
+//struct serviceList_t {
+//	struct service_t services[MAX_SERVICE_LIST];
+//	gint numServiceList;	
+//};
+
+////////////////////////////////////////////////////
+// Structure for the handling the active services 
+///////////////////////////////////////////////////
+struct activeServPath_t {
+	struct topology_id_t topology_id;
+	gchar deviceId[UUID_CHAR_LENGTH];
+	gchar endPointId[UUID_CHAR_LENGTH];
+};
+
+struct activeService_t {
+	struct serviceId_t serviceId;
+	guint service_type;	 // unknown, l2nm, l3nm, tapi
+	struct service_endpoints_id_t service_endpoints_id[MAX_NUM_SERVICE_ENPOINTS_ID];
+	guint num_service_endpoints_id;
+	GList* activeServPath;
 };
 
 ////////////////////////////////////////////////////////////////////////////////////////////
@@ -425,7 +430,6 @@ struct pathLink_t {
 	gchar zEndPointId[UUID_CHAR_LENGTH];
 
 	struct topology_id_t topologyId;
-
 	struct linkTopology_t linkTopologies[2]; // a p2p link (at most) can connect to devices (endpoints) attached to 2 different topologies
 	gint numLinkTopologies;
 };
@@ -454,13 +458,13 @@ struct routeElement_t {
 	gchar contextId[UUID_CHAR_LENGTH];
 };
 
-struct compRouteOutputItem_t {
-	// Potential(total) and available capacity
+struct compRouteOutputItem_t {	
 	gint unit;
 	gdouble totalCap, availCap;
 
 	gdouble cost;
 	gdouble delay;
+	gdouble power;
 
 	struct routeElement_t routeElement[MAX_ROUTE_ELEMENTS];
 	gint numRouteElements;
@@ -477,6 +481,7 @@ struct path_t {
 	struct capacity_t path_capacity;
 	struct latency_characteristics_t path_latency;
 	struct cost_characteristics_t path_cost;
+	struct power_characteristics_t path_power;
 
 	struct pathLink_t pathLinks[MAX_NUM_PATH_LINKS];
 	guint numPathLinks;
@@ -484,18 +489,14 @@ struct path_t {
 
 #define NO_PATH_CONS_ISSUE								1	 // No path due to a constraint issue
 #define MAX_NUM_COMPUTED_PATHS							10
-struct compRouteOutput_t
-{
+struct compRouteOutput_t {
 	// object describing the service identifier: serviceId and contextId
 	struct serviceId_t serviceId;
-
 	// array describing the service endpoints ids
 	struct service_endpoints_id_t service_endpoints_id[MAX_NUM_SERVICE_ENPOINTS_ID];
 	guint num_service_endpoints_id;
-
 	struct path_t paths[MAX_NUM_COMPUTED_PATHS];
-	gint numPaths;
-	
+	gint numPaths;	
 	// if the transport connectivity service cannot be computed, this value is set to 0 determining the constraints were not fulfilled
 	gint noPathIssue;
 };
@@ -504,8 +505,7 @@ struct compRouteOutput_t
 // Structure to handle the response list with all the computed network connectivity services
 ////////////////////////////////////////////////////////////////////////////////////////////
 #define MAX_COMP_CONN_LIST		100
-struct compRouteOutputList_t
-{
+struct compRouteOutputList_t {
 	struct compRouteOutput_t compRouteConnection[MAX_COMP_CONN_LIST];
 	gint numCompRouteConnList;
 
@@ -526,6 +526,7 @@ struct compRouteOutputList_t
 // Prototype of external declaration of functions
 void print_path (struct compRouteOutputItem_t *);
 void print_path_t(struct path_t*);
+struct path_t* create_path();
 
 void duplicate_string(gchar *, gchar *);
 
@@ -533,7 +534,7 @@ gchar* get_uuid_char(uuid_t);
 void copy_service_id(struct serviceId_t*, struct serviceId_t *);
 void copy_service_endpoint_id(struct service_endpoints_id_t *, struct service_endpoints_id_t *);
 
-struct graph_t* get_graph_by_contextId(struct contextSet_t *, gchar *);
+struct graph_t* get_graph_by_contextId(GList*, gchar *);
 
 struct pred_t * create_predecessors ();
 struct edges_t* create_edge();
@@ -561,31 +562,29 @@ gint graph_targeted_vertice_add (gint, gchar *, struct graph_t *);
 void remove_edge_from_graph (struct graph_t *, struct edges_t *);
 
 struct path_set_t * create_path_set ();
-void sort_path_set (struct path_set_t *);
+void sort_path_set (struct path_set_t *, guint);
 void pop_front_path_set (struct path_set_t *);
 void remove_path_set(struct path_set_t*);
 
 void build_map_node(struct map_nodes_t *, struct graph_t *);
 struct compRouteOutputList_t * create_route_list();
+void duplicate_route_list(struct compRouteOutputList_t *, struct compRouteOutputList_t *);
 struct compRouteOutputItem_t * create_path_item (); 
 void add_routeElement_path_back (struct routeElement_t *, struct compRouteOutputItem_t *);
 gboolean matching_path_rootPath (struct compRouteOutputItem_t *, struct compRouteOutputItem_t *, struct nodes_t *, struct edges_t *);
 void modify_targeted_graph (struct graph_t *, struct path_set_t *, struct compRouteOutputItem_t *, struct nodes_t *);
 gint find_nodeId (gconstpointer, gconstpointer);
-gint check_link (struct nodeItem_t *, gint, gint, struct graph_t *, struct service_t *, GList **, GList **, struct map_nodes_t *);
+gint check_link (struct nodeItem_t *, gint, gint, struct graph_t *, struct service_t *, GList **, GList **, struct map_nodes_t *, guint arg);
 gboolean check_computed_path_feasability (struct service_t *, struct compRouteOutputItem_t * );
 
 gint sort_by_distance (gconstpointer, gconstpointer);
+gint sort_by_energy(gconstpointer, gconstpointer);
 
 struct graph_t * create_graph ();
 struct map_nodes_t * create_map_node ();
 
 struct service_t * get_service_for_computed_path(gchar *);
 
-struct deviceList_t* create_device_list();
-struct linkList_t* create_link_list();
-struct serviceList_t* create_service_list();
-
 void print_service_type(guint);
 void print_link_port_direction(guint);
 void print_termination_direction(guint);
@@ -593,9 +592,9 @@ void print_termination_state(guint);
 void print_capacity_unit(guint);
 void print_link_forwarding_direction(guint);
 
-struct contextSet_t* create_contextSet();
-void build_contextSet(struct contextSet_t *);
-void print_contextSet(struct contextSet_t *);
+void build_contextSet(GList **);
+void build_contextSet_active(GList **);
+void print_contextSet(GList *);
 
 gint same_src_dst_pe_nodeid (struct service_t *);
 void comp_route_connection_issue_handler (struct compRouteOutput_t *, struct service_t *);
@@ -610,4 +609,12 @@ struct timeval tv_adjust(struct timeval);
 
 void print_path_connection_list(struct compRouteOutputList_t*);
 void update_stats_path_comp(struct compRouteOutputList_t*, struct timeval, gint, gint);
+void destroy_active_service(struct activeService_t*);
+void destroy_requested_service(struct service_t*);
+void destroy_device(struct device_t*);
+void destroy_link(struct link_t*);
+void destroy_context(struct context_t*);
+void dijkstra(gint, gint, struct graph_t*, struct service_t*, struct map_nodes_t*, struct nodes_t*, struct compRouteOutputItem_t*, guint);
+void set_path_attributes(struct compRouteOutputItem_t*, struct map_t*);
+void alg_comp(struct service_t*, struct compRouteOutput_t*, struct graph_t*, guint);
 #endif
\ No newline at end of file
diff --git a/src/pathcomp/frontend/Config.py b/src/pathcomp/frontend/Config.py
index f17a9f5377b5abcbd9001d1d3773e26998cb3211..714eb7278074ac860caa76dc3ed8b4a40ae9f192 100644
--- a/src/pathcomp/frontend/Config.py
+++ b/src/pathcomp/frontend/Config.py
@@ -26,8 +26,9 @@ PATHCOMP_BACKEND_BASEURL = str(os.environ.get('PATHCOMP_BACKEND_BASEURL', DEFAUL
 # - first check env vars PATHCOMP_BACKEND_HOST & PATHCOMP_BACKEND_PORT
 # - if not set, check env vars PATHCOMPSERVICE_SERVICE_HOST & PATHCOMPSERVICE_SERVICE_PORT_HTTP
 # - if not set, use DEFAULT_PATHCOMP_BACKEND_HOST & DEFAULT_PATHCOMP_BACKEND_PORT
+
 backend_host = DEFAULT_PATHCOMP_BACKEND_HOST
-backend_host = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', backend_host)
+#backend_host = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', backend_host)
 PATHCOMP_BACKEND_HOST = str(os.environ.get('PATHCOMP_BACKEND_HOST', backend_host))
 
 backend_port = DEFAULT_PATHCOMP_BACKEND_PORT
diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile
index 352de75f31366b65e62e2f6357d1bd5f28bd2b0f..9384b3e19edd5e82b0efcb9706c41105a31321e3 100644
--- a/src/pathcomp/frontend/Dockerfile
+++ b/src/pathcomp/frontend/Dockerfile
@@ -62,8 +62,14 @@ RUN python3 -m pip install -r requirements.txt
 
 # Add component files into working directory
 WORKDIR /var/teraflow
-COPY src/context/. context/
-COPY src/device/. device/
+COPY src/context/__init__.py context/__init__.py
+COPY src/context/client/. context/client/
+COPY src/device/__init__.py device/__init__.py
+COPY src/device/client/. device/client/
+COPY src/service/__init__.py service/__init__.py
+COPY src/service/client/. service/client/
+COPY src/slice/__init__.py slice/__init__.py
+COPY src/slice/client/. slice/client/
 COPY src/pathcomp/. pathcomp/
 
 # Start the service
diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
index 6fc33dbd45a92405fb2fa115e12cb460a9111d54..52f1cd3d584e14ca5dee1bc5e0511e014bdc8e73 100644
--- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
+++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
@@ -13,9 +13,9 @@
 # limitations under the License.
 
 import grpc, logging, threading
-from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
-from common.proto.context_pb2 import ContextId, Empty
+from common.proto.context_pb2 import ContextId, Empty, TopologyId
 from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
 from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer
 from common.tools.context_queries.Device import get_devices_in_topology
@@ -23,6 +23,7 @@ from common.tools.context_queries.Link import get_links_in_topology
 from common.tools.context_queries.InterDomain import is_inter_domain
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.service.algorithms.Factory import get_algorithm
 
@@ -30,7 +31,7 @@ LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('PathComp', 'RPC')
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+#ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 class PathCompServiceServicerImpl(PathCompServiceServicer):
     def __init__(self) -> None:
@@ -44,18 +45,23 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
 
         context_client = ContextClient()
 
+        context_id = json_context_id(DEFAULT_CONTEXT_NAME)
         if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids):
-            devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
-            links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            #devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            #links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            topology_id = json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id)
         else:
             # TODO: improve filtering of devices and links
             # TODO: add contexts, topologies, and membership of devices/links in topologies
-            devices = context_client.ListDevices(Empty())
-            links = context_client.ListLinks(Empty())
+            #devices = context_client.ListDevices(Empty())
+            #links = context_client.ListLinks(Empty())
+            topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id)
+
+        topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id))
 
         algorithm = get_algorithm(request)
-        algorithm.add_devices(devices)
-        algorithm.add_links(links)
+        algorithm.add_devices(topology_details.devices)
+        algorithm.add_links(topology_details.links)
         algorithm.add_service_requests(request)
 
         #LOGGER.debug('device_list = {:s}'  .format(str(algorithm.device_list  )))
diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
index a6d39ee36949e075323613fceb71da5c77354fe5..144246620e85dd1aaf507efe75e22b62ce942587 100644
--- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
@@ -14,12 +14,10 @@
 
 import operator
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import Connection, Link, Service
-from common.proto.pathcomp_pb2 import Algorithm_KDisjointPath, Algorithm_KShortestPath, PathCompReply, PathCompRequest
+from common.proto.context_pb2 import Link
+from common.proto.pathcomp_pb2 import Algorithm_KDisjointPath, Algorithm_KShortestPath, PathCompRequest
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from pathcomp.frontend.service.algorithms.tools.ComputeSubServices import convert_explicit_path_hops_to_connections
-from pathcomp.frontend.service.algorithms.tools.EroPathToHops import eropath_to_hops
-from ._Algorithm import _Algorithm
+from ._Algorithm import _Algorithm, SRC_END
 from .KShortestPathAlgorithm import KShortestPathAlgorithm
 
 Service_Id          = Tuple[str, str]   # (context_uuid, service_uuid)
@@ -100,7 +98,7 @@ class KDisjointPathAlgorithm(_Algorithm):
     def get_link_from_endpoint(self, endpoint : Dict) -> Tuple[Dict, Link]:
         device_uuid = endpoint['device_id']
         endpoint_uuid = endpoint['endpoint_uuid']
-        item = self.endpoint_to_link_dict.get((device_uuid, endpoint_uuid))
+        item = self.endpoint_to_link_dict.get((device_uuid, endpoint_uuid, SRC_END))
         if item is None:
             MSG = 'Link for Endpoint({:s}, {:s}) not found'
             self.logger.warning(MSG.format(device_uuid, endpoint_uuid))
@@ -141,7 +139,7 @@ class KDisjointPathAlgorithm(_Algorithm):
 
         Path = List[Dict]
         Path_NoPath = Optional[Path] # None = no path, list = path
-        self.json_reply : Dict[Tuple[str, str], List[Path_NoPath]] = dict()
+        service_to_paths : Dict[Tuple[str, str], List[Path_NoPath]] = dict()
 
         for num_path in range(self.num_disjoint):
             algorithm.service_list = list()
@@ -189,66 +187,25 @@ class KDisjointPathAlgorithm(_Algorithm):
             for response in response_list:
                 service_id = response['serviceId']
                 service_key = (service_id['contextId'], service_id['service_uuid'])
-                json_reply_service = self.json_reply.setdefault(service_key, list())
+                json_reply_service = service_to_paths.setdefault(service_key, list())
 
                 no_path_issue = response.get('noPath', {}).get('issue')
-                if no_path_issue is not None:
-                    json_reply_service.append(None)
-                    continue
+                if no_path_issue is not None: continue
 
-                path_endpoints = response['path'][0]['devices']
+                path_endpoints = response['path'][0]
                 json_reply_service.append(path_endpoints)
-                algorithm.link_list = self.remove_traversed_links(algorithm.link_list, path_endpoints)
+                algorithm.link_list = self.remove_traversed_links(algorithm.link_list, path_endpoints['devices'])
+
+        self.json_reply = dict()
+        response_list = self.json_reply.setdefault('response-list', [])
+        for service_key,paths in service_to_paths.items():
+            response = {'serviceId': {
+                'contextId': service_key[0],
+                'service_uuid': service_key[1],
+            }}
+            response['path'] = paths
+            if len(paths) < self.num_disjoint:
+                response['noPath'] = {'issue': 1}
+            response_list.append(response)
 
         self.logger.debug('self.json_reply = {:s}'.format(str(self.json_reply)))
-
-    def get_reply(self) -> PathCompReply:
-        reply = PathCompReply()
-        grpc_services : Dict[Tuple[str, str], Service] = {}
-        grpc_connections : Dict[Tuple[int, str], Connection] = {}
-        for service_key,paths in self.json_reply.items():
-            context_uuid, service_uuid = service_key
-
-            grpc_services[service_key] = self.add_service_to_reply(reply, context_uuid, service_uuid)
-
-            for num_path,service_path_ero in enumerate(paths):
-                self.logger.warning('num_path={:d}'.format(num_path))
-                self.logger.warning('service_path_ero={:s}'.format(str(service_path_ero)))
-                if service_path_ero is None: continue
-                path_hops = eropath_to_hops(service_path_ero, self.endpoint_to_link_dict)
-                self.logger.warning('path_hops={:s}'.format(str(path_hops)))
-                connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid)
-                self.logger.warning('connections={:s}'.format(str(connections)))
-
-                for connection in connections:
-                    connection_uuid,device_layer,path_hops,_ = connection
-
-                    service_key = (context_uuid, connection_uuid)
-                    grpc_service = grpc_services.get(service_key)
-                    if grpc_service is not None: continue
-                    grpc_service = self.add_service_to_reply(
-                        reply, context_uuid, connection_uuid, device_layer=device_layer, path_hops=path_hops)
-                    grpc_services[service_key] = grpc_service
-
-                for connection in connections:
-                    connection_uuid,device_layer,path_hops,dependencies = connection
-
-                    service_key = (context_uuid, connection_uuid)
-                    grpc_service = grpc_services.get(service_key)
-                    if grpc_service is None: raise Exception('Service({:s}) not found'.format(str(service_key)))
-
-                    connection_uuid = '{:s}:{:d}'.format(connection_uuid, num_path)
-                    grpc_connection = grpc_connections.get(connection_uuid)
-                    if grpc_connection is not None: continue
-                    grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops)
-                    grpc_connections[connection_uuid] = grpc_connection
-
-                    for sub_service_uuid in dependencies:
-                        sub_service_key = (context_uuid, sub_service_uuid)
-                        grpc_sub_service = grpc_services.get(sub_service_key)
-                        if grpc_sub_service is None:
-                            raise Exception('Service({:s}) not found'.format(str(sub_service_key)))
-                        grpc_sub_service_id = grpc_connection.sub_service_ids.add()
-                        grpc_sub_service_id.CopyFrom(grpc_sub_service.service_id)
-
-        return reply
diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
index b6316774921171eb8ed6cf3faafd4b607bdcb831..b486ec1b59457b1ac575fb6197c7713b10c306e3 100644
--- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
@@ -15,17 +15,20 @@
 import json, logging, requests
 from typing import Dict, List, Optional, Tuple, Union
 from common.proto.context_pb2 import (
-    ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum,
-    ServiceTypeEnum)
+    Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum)
 from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
-from common.tools.object_factory.ConfigRule import json_config_rule_set
 from pathcomp.frontend.Config import BACKEND_URL
-from pathcomp.frontend.service.algorithms.tools.ConstantsMappings import DEVICE_LAYER_TO_SERVICE_TYPE, DeviceLayerEnum
 from .tools.EroPathToHops import eropath_to_hops
+from .tools.ComposeConfigRules import (
+    compose_device_config_rules, compose_l2nm_config_rules, compose_l3nm_config_rules, compose_tapi_config_rules)
 from .tools.ComposeRequest import compose_device, compose_link, compose_service
 from .tools.ComputeSubServices import (
     convert_explicit_path_hops_to_connections, convert_explicit_path_hops_to_plain_connection)
 
+SRC_END = 'src'
+DST_END = 'dst'
+SENSE = [SRC_END, DST_END]
+
 class _Algorithm:
     def __init__(self, algorithm_id : str, sync_paths : bool, class_name=__name__) -> None:
         # algorithm_id: algorithm to be executed
@@ -45,7 +48,7 @@ class _Algorithm:
         self.endpoint_name_mapping : Dict[Tuple[str, str], str] = dict()
         self.link_list : List[Dict] = list()
         self.link_dict : Dict[str, Tuple[Dict, Link]] = dict()
-        self.endpoint_to_link_dict : Dict[Tuple[str, str], Tuple[Dict, Link]] = dict()
+        self.endpoint_to_link_dict : Dict[Tuple[str, str, str], Tuple[Dict, Link]] = dict()
         self.service_list : List[Dict] = list()
         self.service_dict : Dict[Tuple[str, str], Tuple[Dict, Service]] = dict()
 
@@ -61,6 +64,7 @@ class _Algorithm:
             _device_uuid = grpc_device.device_id.device_uuid.uuid
             _device_name = grpc_device.name
             self.device_name_mapping[_device_name] = _device_uuid
+            self.device_name_mapping[_device_uuid] = _device_uuid
 
             device_endpoint_dict : Dict[str, Tuple[Dict, EndPointId]] = dict()
             for json_endpoint,grpc_endpoint in zip(json_device['device_endpoints'], grpc_device.device_endpoints):
@@ -72,12 +76,16 @@ class _Algorithm:
                 _endpoint_name = grpc_endpoint.name
                 self.endpoint_name_mapping[(_device_uuid, _endpoint_name)] = _endpoint_uuid
                 self.endpoint_name_mapping[(_device_name, _endpoint_name)] = _endpoint_uuid
+                self.endpoint_name_mapping[(_device_uuid, _endpoint_uuid)] = _endpoint_uuid
+                self.endpoint_name_mapping[(_device_name, _endpoint_uuid)] = _endpoint_uuid
 
             self.endpoint_dict[device_uuid] = device_endpoint_dict
 
     def add_links(self, grpc_links : Union[List[Link], LinkList]) -> None:
         if isinstance(grpc_links, LinkList): grpc_links = grpc_links.links
         for grpc_link in grpc_links:
+            if 'mgmt' in grpc_link.name.lower(): continue
+
             json_link = compose_link(grpc_link)
             if len(json_link['link_endpoint_ids']) != 2: continue
             self.link_list.append(json_link)
@@ -85,11 +93,11 @@ class _Algorithm:
             link_uuid = json_link['link_Id']
             self.link_dict[link_uuid] = (json_link, grpc_link)
 
-            for link_endpoint_id in json_link['link_endpoint_ids']:
+            for i,link_endpoint_id in enumerate(json_link['link_endpoint_ids']):
                 link_endpoint_id = link_endpoint_id['endpoint_id']
                 device_uuid = link_endpoint_id['device_id']
                 endpoint_uuid = link_endpoint_id['endpoint_uuid']
-                endpoint_key = (device_uuid, endpoint_uuid)
+                endpoint_key = (device_uuid, endpoint_uuid, SENSE[i])
                 link_tuple = (json_link, grpc_link)
                 self.endpoint_to_link_dict[endpoint_key] = link_tuple
 
@@ -148,9 +156,8 @@ class _Algorithm:
         return connection
 
     def add_service_to_reply(
-        self, reply : PathCompReply, context_uuid : str, service_uuid : str,
-        device_layer : Optional[DeviceLayerEnum] = None, path_hops : List[Dict] = [],
-        config_rules : List = []
+        self, reply : PathCompReply, context_uuid : str, service_uuid : str, service_type : ServiceTypeEnum,
+        path_hops : List[Dict] = [], config_rules : List = []
     ) -> Service:
         # TODO: implement support for multi-point services
         # Control deactivated to enable disjoint paths with multiple redundant endpoints on each side
@@ -159,44 +166,43 @@ class _Algorithm:
 
         service_key = (context_uuid, service_uuid)
         tuple_service = self.service_dict.get(service_key)
-        if tuple_service is not None:
-            service = reply.services.add()
-            service.CopyFrom(tuple_service[1])
+
+        service = reply.services.add()
+        service.service_id.context_id.context_uuid.uuid = context_uuid
+        service.service_id.service_uuid.uuid = service_uuid
+        service.service_type = service_type
+
+        if service_type == ServiceTypeEnum.SERVICETYPE_L2NM:
+            compose_l2nm_config_rules(config_rules, service.service_config.config_rules)
+        elif service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
+            compose_l3nm_config_rules(config_rules, service.service_config.config_rules)
+        elif service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
+            compose_tapi_config_rules(config_rules, service.service_config.config_rules)
         else:
-            service = reply.services.add()
-            service.service_id.context_id.context_uuid.uuid = context_uuid
-            service.service_id.service_uuid.uuid = service_uuid
-
-            if device_layer is not None:
-                service_type = DEVICE_LAYER_TO_SERVICE_TYPE.get(device_layer.value)
-                if service_type is None:
-                    MSG = 'Unable to map DeviceLayer({:s}) to ServiceType'
-                    raise Exception(MSG.format(str(device_layer)))
-                service.service_type = service_type
-
-                if service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
-                    json_tapi_settings = {
-                        'capacity_value'  : 50.0,
-                        'capacity_unit'   : 'GHz',
-                        'layer_proto_name': 'PHOTONIC_MEDIA',
-                        'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC',
-                        'direction'       : 'UNIDIRECTIONAL',
-                    }
-                    config_rule = ConfigRule(**json_config_rule_set('/settings', json_tapi_settings))
-                    service.service_config.config_rules.append(config_rule)
-                else:
-                    service.service_config.config_rules.extend(config_rules)
+            MSG = 'Unhandled generic Config Rules for service {:s} {:s}'
+            self.logger.warning(MSG.format(str(service_uuid), str(ServiceTypeEnum.Name(service_type))))
 
-            service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
+        compose_device_config_rules(
+            config_rules, service.service_config.config_rules, path_hops,
+            self.device_name_mapping, self.endpoint_name_mapping)
 
-            if path_hops is not None and len(path_hops) > 0:
-                ingress_endpoint_id = service.service_endpoint_ids.add()
-                ingress_endpoint_id.device_id.device_uuid.uuid = path_hops[0]['device']
-                ingress_endpoint_id.endpoint_uuid.uuid = path_hops[0]['ingress_ep']
+        if path_hops is not None and len(path_hops) > 0:
+            ingress_endpoint_id = service.service_endpoint_ids.add()
+            ingress_endpoint_id.device_id.device_uuid.uuid = path_hops[0]['device']
+            ingress_endpoint_id.endpoint_uuid.uuid = path_hops[0]['ingress_ep']
 
-                egress_endpoint_id = service.service_endpoint_ids.add()
-                egress_endpoint_id.device_id.device_uuid.uuid = path_hops[-1]['device']
-                egress_endpoint_id.endpoint_uuid.uuid = path_hops[-1]['egress_ep']
+            egress_endpoint_id = service.service_endpoint_ids.add()
+            egress_endpoint_id.device_id.device_uuid.uuid = path_hops[-1]['device']
+            egress_endpoint_id.endpoint_uuid.uuid = path_hops[-1]['egress_ep']
+
+        if tuple_service is None:
+            service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
+        else:
+            service.name = tuple_service[1].name
+            service.service_status.CopyFrom(tuple_service[1].service_status)
+            service.timestamp.CopyFrom(tuple_service[1].timestamp)
+            for constraint in tuple_service[1].service_constraints:
+                service.service_constraints.add().CopyFrom(constraint)
 
         return service
 
@@ -206,41 +212,54 @@ class _Algorithm:
         grpc_services : Dict[Tuple[str, str], Service] = {}
         grpc_connections : Dict[str, Connection] = {}
         for response in response_list:
-            service_id = response['serviceId']
-            context_uuid = service_id['contextId']
-            service_uuid = service_id['service_uuid']
-            service_key = (context_uuid, service_uuid)
-            upper_service = self.add_service_to_reply(reply, context_uuid, service_uuid)
-            grpc_services[service_key] = upper_service
+            orig_service_id = response['serviceId']
+            context_uuid = orig_service_id['contextId']
+            main_service_uuid = orig_service_id['service_uuid']
+            orig_service_key = (context_uuid, main_service_uuid)
+            _,grpc_orig_service = self.service_dict[orig_service_key]
+            main_service_type = grpc_orig_service.service_type
 
             no_path_issue = response.get('noPath', {}).get('issue')
             if no_path_issue is not None:
                 # no path found: leave connection with no endpoints
                 # no_path_issue == 1 => no path due to a constraint
+                grpc_services[orig_service_key] = grpc_orig_service
                 continue
 
+            orig_config_rules = grpc_orig_service.service_config.config_rules
+
             for service_path_ero in response['path']:
+                self.logger.debug('service_path_ero["devices"] = {:s}'.format(str(service_path_ero['devices'])))
+                _endpoint_to_link_dict = {k:v[0] for k,v in self.endpoint_to_link_dict.items()}
+                self.logger.debug('self.endpoint_to_link_dict = {:s}'.format(str(_endpoint_to_link_dict)))
                 path_hops = eropath_to_hops(service_path_ero['devices'], self.endpoint_to_link_dict)
+                self.logger.debug('path_hops = {:s}'.format(str(path_hops)))
                 try:
-                    connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid)
+                    _device_dict = {k:v[0] for k,v in self.device_dict.items()}
+                    self.logger.debug('self.device_dict = {:s}'.format(str(_device_dict)))
+                    connections = convert_explicit_path_hops_to_connections(
+                        path_hops, self.device_dict, main_service_uuid, main_service_type)
+                    self.logger.debug('EXTRAPOLATED connections = {:s}'.format(str(connections)))
                 except: # pylint: disable=bare-except
-                    # if not able to extrapolate sub-services and sub-connections,
-                    # assume single service and single connection
-                    connections = convert_explicit_path_hops_to_plain_connection(path_hops, service_uuid)
+                    MSG = ' '.join([
+                        'Unable to Extrapolate sub-services and sub-connections.',
+                        'Assuming single-service and single-connection.',
+                    ])
+                    self.logger.exception(MSG)
+                    connections = convert_explicit_path_hops_to_plain_connection(
+                        path_hops, main_service_uuid, main_service_type)
+                    self.logger.debug('BASIC connections = {:s}'.format(str(connections)))
 
                 for connection in connections:
-                    connection_uuid,device_layer,path_hops,_ = connection
+                    connection_uuid,service_type,path_hops,_ = connection
                     service_key = (context_uuid, connection_uuid)
-                    grpc_service = grpc_services.get(service_key)
-                    if grpc_service is None:
-                        config_rules = upper_service.service_config.config_rules
-                        grpc_service = self.add_service_to_reply(
-                            reply, context_uuid, connection_uuid, device_layer=device_layer, path_hops=path_hops,
-                            config_rules=config_rules)
-                        grpc_services[service_key] = grpc_service
+                    grpc_service = self.add_service_to_reply(
+                        reply, context_uuid, connection_uuid, service_type, path_hops=path_hops,
+                        config_rules=orig_config_rules)
+                    grpc_services[service_key] = grpc_service
 
                 for connection in connections:
-                    connection_uuid,device_layer,path_hops,dependencies = connection
+                    connection_uuid,_,path_hops,dependencies = connection
 
                     service_key = (context_uuid, connection_uuid)
                     grpc_service = grpc_services.get(service_key)
@@ -251,8 +270,8 @@ class _Algorithm:
                     grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops)
                     grpc_connections[connection_uuid] = grpc_connection
 
-                    for service_uuid in dependencies:
-                        sub_service_key = (context_uuid, service_uuid)
+                    for sub_service_uuid in dependencies:
+                        sub_service_key = (context_uuid, sub_service_uuid)
                         grpc_sub_service = grpc_services.get(sub_service_key)
                         if grpc_sub_service is None:
                             raise Exception('Service({:s}) not found'.format(str(sub_service_key)))
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
new file mode 100644
index 0000000000000000000000000000000000000000..91367e23f29a02aa3e9605fcd0d2864b9191d800
--- /dev/null
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
@@ -0,0 +1,101 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import itertools, json, re
+from typing import Dict, List, Optional, Tuple
+from common.proto.context_pb2 import ConfigRule
+from common.tools.object_factory.ConfigRule import json_config_rule_set
+
+SETTINGS_RULE_NAME = '/settings'
+
+DEV_EP_SETTINGS = re.compile(r'\/device\[([^\]]+)\]\/endpoint\[([^\]]+)\]\/settings')
+
+L2NM_SETTINGS_FIELD_DEFAULTS = {
+    'encapsulation_type': 'dot1q',
+    'vlan_id'           : 100,
+    'mtu'               : 1450,
+}
+
+L3NM_SETTINGS_FIELD_DEFAULTS = {
+    'encapsulation_type': 'dot1q',
+    'vlan_id'           : 100,
+    'mtu'               : 1450,
+}
+
+TAPI_SETTINGS_FIELD_DEFAULTS = {
+    'capacity_value'  : 50.0,
+    'capacity_unit'   : 'GHz',
+    'layer_proto_name': 'PHOTONIC_MEDIA',
+    'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC',
+    'direction'       : 'UNIDIRECTIONAL',
+}
+
+def find_custom_config_rule(config_rules : List, resource_name : str) -> Optional[Dict]:
+    resource_value : Optional[Dict] = None
+    for config_rule in config_rules:
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        if config_rule.custom.resource_key != resource_name: continue
+        resource_value = json.loads(config_rule.custom.resource_value)
+    return resource_value
+
+def compose_config_rules(
+    main_service_config_rules : List, subservice_config_rules : List, field_defaults : Dict
+) -> None:
+    settings = find_custom_config_rule(main_service_config_rules, SETTINGS_RULE_NAME)
+    if settings is None: return
+
+    json_settings = {}
+    for field_name,default_value in field_defaults.items():
+        json_settings[field_name] = settings.get(field_name, default_value)
+
+    config_rule = ConfigRule(**json_config_rule_set('/settings', json_settings))
+    subservice_config_rules.append(config_rule)
+
+def compose_l2nm_config_rules(main_service_config_rules : List, subservice_config_rules : List) -> None:
+    compose_config_rules(main_service_config_rules, subservice_config_rules, L2NM_SETTINGS_FIELD_DEFAULTS)
+
+def compose_l3nm_config_rules(main_service_config_rules : List, subservice_config_rules : List) -> None:
+    compose_config_rules(main_service_config_rules, subservice_config_rules, L3NM_SETTINGS_FIELD_DEFAULTS)
+
+def compose_tapi_config_rules(main_service_config_rules : List, subservice_config_rules : List) -> None:
+    compose_config_rules(main_service_config_rules, subservice_config_rules, TAPI_SETTINGS_FIELD_DEFAULTS)
+
+def compose_device_config_rules(
+    config_rules : List, subservice_config_rules : List, path_hops : List,
+    device_name_mapping : Dict[str, str], endpoint_name_mapping : Dict[Tuple[str, str], str]
+) -> None:
+
+    endpoints_traversed = set()
+    for path_hop in path_hops:
+        device_uuid_or_name = path_hop['device']
+        endpoints_traversed.add((device_uuid_or_name, path_hop['ingress_ep']))
+        endpoints_traversed.add((device_uuid_or_name, path_hop['egress_ep']))
+
+    for config_rule in config_rules:
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        match = DEV_EP_SETTINGS.match(config_rule.custom.resource_key)
+        if match is None: continue
+
+        device_uuid_or_name = match.group(1)
+        device_name_or_uuid = device_name_mapping[device_uuid_or_name]
+        device_keys = {device_uuid_or_name, device_name_or_uuid}
+
+        endpoint_uuid_or_name = match.group(2)
+        endpoint_name_or_uuid_1 = endpoint_name_mapping[(device_uuid_or_name, endpoint_uuid_or_name)]
+        endpoint_name_or_uuid_2 = endpoint_name_mapping[(device_name_or_uuid, endpoint_uuid_or_name)]
+        endpoint_keys = {endpoint_uuid_or_name, endpoint_name_or_uuid_1, endpoint_name_or_uuid_2}
+
+        device_endpoint_keys = set(itertools.product(device_keys, endpoint_keys))
+        if len(device_endpoint_keys.intersection(endpoints_traversed)) == 0: continue
+        subservice_config_rules.append(config_rule)
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
index ee85f0bb083500c655e78798bbcd2bd00e8a4501..e2c6dc13804703d89242b27156763ce887aa4884 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
@@ -118,11 +118,11 @@ def compose_link(grpc_link : Link) -> Dict:
         for link_endpoint_id in grpc_link.link_endpoint_ids
     ]
 
-    forwarding_direction = LinkForwardingDirection.BIDIRECTIONAL.value
+    forwarding_direction = LinkForwardingDirection.UNIDIRECTIONAL.value
     total_potential_capacity = compose_capacity(200, CapacityUnit.MBPS.value)
     available_capacity = compose_capacity(200, CapacityUnit.MBPS.value)
     cost_characteristics = compose_cost_characteristics('linkcost', '1', '0')
-    latency_characteristics = compose_latency_characteristics('2')
+    latency_characteristics = compose_latency_characteristics('1')
 
     return {
         'link_Id': link_uuid, 'link_endpoint_ids': endpoint_ids, 'forwarding_direction': forwarding_direction,
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
index b92a19b52c4887e01f7f1bc58de897c783683eeb..40cb0857617983df4cfd926baebcbff85e169894 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py
@@ -30,55 +30,77 @@
 # ]
 #
 # connections=[
-#     (UUID('7548edf7-ee7c-4adf-ac0f-c7a0c0dfba8e'), <DeviceLayerEnum.OPTICAL_CONTROLLER: 1>, [
+#     (UUID('7548edf7-ee7c-4adf-ac0f-c7a0c0dfba8e'), ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, [
 #             {'device': 'TN-OLS', 'ingress_ep': '833760219d0f', 'egress_ep': 'cf176771a4b9'}
 #         ], []),
-#     (UUID('c2e57966-5d82-4705-a5fe-44cf6487219e'), <DeviceLayerEnum.PACKET_DEVICE: 30>, [
+#     (UUID('c2e57966-5d82-4705-a5fe-44cf6487219e'), ServiceTypeEnum.SERVICETYPE_L2NM, [
 #             {'device': 'CS1-GW1', 'ingress_ep': '10/1', 'egress_ep': '1/2'},
 #             {'device': 'TN-R2', 'ingress_ep': '1/2', 'egress_ep': '2/1'},
 #             {'device': 'TN-R3', 'ingress_ep': '2/1', 'egress_ep': '1/1'},
 #             {'device': 'CS2-GW1', 'ingress_ep': '1/1', 'egress_ep': '10/1'}
 #         ], [UUID('7548edf7-ee7c-4adf-ac0f-c7a0c0dfba8e')]),
-#     (UUID('1e205c82-f6ea-4977-9e97-dc27ef1f4802'), <DeviceLayerEnum.APPLICATION_DEVICE: 40>, [
+#     (UUID('1e205c82-f6ea-4977-9e97-dc27ef1f4802'), ServiceTypeEnum.SERVICETYPE_L2NM, [
 #             {'device': 'DC1-GW', 'ingress_ep': 'int', 'egress_ep': 'eth1'},
 #             {'device': 'DC2-GW', 'ingress_ep': 'eth1', 'egress_ep': 'int'}
 #         ], [UUID('c2e57966-5d82-4705-a5fe-44cf6487219e')])
 # ]
 
-import queue, uuid
-from typing import Dict, List, Tuple
-from common.proto.context_pb2 import Device
-from .ConstantsMappings import DEVICE_TYPE_TO_LAYER, DeviceLayerEnum
+import logging, queue, uuid
+from typing import Dict, List, Optional, Tuple
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import Device, ServiceTypeEnum
+from .ResourceGroups import IGNORED_DEVICE_TYPES, get_resource_classification
+from .ServiceTypes import get_service_type
+
+LOGGER = logging.getLogger(__name__)
 
 def convert_explicit_path_hops_to_connections(
-    path_hops : List[Dict], device_dict : Dict[str, Tuple[Dict, Device]], main_connection_uuid : str
-) -> List[Tuple[str, DeviceLayerEnum, List[str], List[str]]]:
+    path_hops : List[Dict], device_dict : Dict[str, Tuple[Dict, Device]],
+    main_service_uuid : str, main_service_type : ServiceTypeEnum
+) -> List[Tuple[str, int, List[str], List[str]]]:
+
+    LOGGER.debug('path_hops={:s}'.format(str(path_hops)))
 
     connection_stack = queue.LifoQueue()
-    connections : List[Tuple[str, DeviceLayerEnum, List[str], List[str]]] = list()
-    old_device_layer = None
-    last_device_uuid = None
+    connections : List[Tuple[str, int, List[str], List[str]]] = list()
+    prv_device_uuid = None
+    prv_res_class : Tuple[Optional[int], Optional[DeviceTypeEnum], Optional[str]] = None, None, None
+
     for path_hop in path_hops:
         device_uuid = path_hop['device']
-        if last_device_uuid == device_uuid: continue
+        if prv_device_uuid == device_uuid: continue
         device_tuple = device_dict.get(device_uuid)
         if device_tuple is None: raise Exception('Device({:s}) not found'.format(str(device_uuid)))
-        json_device,_ = device_tuple
-        device_type = json_device['device_type']
-        device_layer = DEVICE_TYPE_TO_LAYER.get(device_type)
-        if device_layer is None: raise Exception('Undefined Layer for DeviceType({:s})'.format(str(device_type)))
+        _,grpc_device = device_tuple
 
-        if old_device_layer is None:
+        res_class = get_resource_classification(grpc_device, device_dict)
+        if res_class[1] in IGNORED_DEVICE_TYPES: continue
+
+        if prv_res_class[0] is None:
             # path ingress
-            connection_stack.put((main_connection_uuid, device_layer, [path_hop], []))
-        elif old_device_layer > device_layer:
-            # underlying connection begins
+            connection_stack.put((main_service_uuid, main_service_type, [path_hop], []))
+        elif prv_res_class[0] > res_class[0]:
+            # create underlying connection
             connection_uuid = str(uuid.uuid4())
-            connection_stack.put((connection_uuid, device_layer, [path_hop], []))
-        elif old_device_layer == device_layer:
-            # same connection continues
-            connection_stack.queue[-1][2].append(path_hop)
-        elif old_device_layer < device_layer:
+            prv_service_type = connection_stack.queue[-1][1]
+            service_type = get_service_type(res_class[1], prv_service_type)
+            connection_stack.put((connection_uuid, service_type, [path_hop], []))
+        elif prv_res_class[0] == res_class[0]:
+            # same resource group kind
+            if prv_res_class[1] == res_class[1] and prv_res_class[2] == res_class[2]:
+                # same device type and device controller: connection continues
+                connection_stack.queue[-1][2].append(path_hop)
+            else:
+                # different device type or device controller: chain connections
+                connection = connection_stack.get()
+                connections.append(connection)
+                connection_stack.queue[-1][3].append(connection[0])
+
+                connection_uuid = str(uuid.uuid4())
+                prv_service_type = connection_stack.queue[-1][1]
+                service_type = get_service_type(res_class[1], prv_service_type)
+                connection_stack.put((connection_uuid, service_type, [path_hop], []))
+        elif prv_res_class[0] < res_class[0]:
             # underlying connection ended
             connection = connection_stack.get()
             connections.append(connection)
@@ -87,26 +109,27 @@ def convert_explicit_path_hops_to_connections(
         else:
             raise Exception('Uncontrolled condition')
 
-        old_device_layer = device_layer
-        last_device_uuid = device_uuid
+        prv_device_uuid = device_uuid
+        prv_res_class = res_class
 
     # path egress
     connections.append(connection_stack.get())
+    LOGGER.debug('connections={:s}'.format(str(connections)))
     assert connection_stack.empty()
     return connections
 
 def convert_explicit_path_hops_to_plain_connection(
-    path_hops : List[Dict], main_connection_uuid : str
-) -> List[Tuple[str, DeviceLayerEnum, List[str], List[str]]]:
+    path_hops : List[Dict], main_service_uuid : str, main_service_type : ServiceTypeEnum
+) -> List[Tuple[str, int, List[str], List[str]]]:
 
-    connection : Tuple[str, DeviceLayerEnum, List[str], List[str]] = \
-        (main_connection_uuid, DeviceLayerEnum.PACKET_DEVICE, [], [])
+    connection : Tuple[str, int, List[str], List[str]] = \
+        (main_service_uuid, main_service_type, [], [])
 
-    last_device_uuid = None
+    prv_device_uuid = None
     for path_hop in path_hops:
         device_uuid = path_hop['device']
-        if last_device_uuid == device_uuid: continue
+        if prv_device_uuid == device_uuid: continue
         connection[2].append(path_hop)
-        last_device_uuid = device_uuid
+        prv_device_uuid = device_uuid
 
     return [connection]
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
index cd1956a873dd2170c7a75db0c677db34162449ee..bd06e6ba19b3da9e2d38d5b83e1d7d3a806ff14f 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
@@ -13,8 +13,6 @@
 # limitations under the License.
 
 from enum import IntEnum
-from common.DeviceTypes import DeviceTypeEnum
-from common.proto.context_pb2 import ServiceTypeEnum
 
 class CapacityUnit(IntEnum):
     TB   = 0
@@ -66,50 +64,3 @@ class LinkForwardingDirection(IntEnum):
     BIDIRECTIONAL  = 0
     UNIDIRECTIONAL = 1
     UNKNOWN        = 2
-
-class DeviceLayerEnum(IntEnum):
-    APPLICATION_CONTROLLER = 41     # Layer 4 domain controller
-    APPLICATION_DEVICE     = 40     # Layer 4 domain device
-    PACKET_CONTROLLER      = 31     # Layer 3 domain controller
-    PACKET_DEVICE          = 30     # Layer 3 domain device
-    MAC_LAYER_CONTROLLER   = 21     # Layer 2 domain controller
-    MAC_LAYER_DEVICE       = 20     # Layer 2 domain device
-    OPTICAL_CONTROLLER     =  1     # Layer 0 domain controller
-    OPTICAL_DEVICE         =  0     # Layer 0 domain device
-
-DEVICE_TYPE_TO_LAYER = {
-    DeviceTypeEnum.EMULATED_DATACENTER.value             : DeviceLayerEnum.APPLICATION_DEVICE,
-    DeviceTypeEnum.DATACENTER.value                      : DeviceLayerEnum.APPLICATION_DEVICE,
-    DeviceTypeEnum.NETWORK.value                         : DeviceLayerEnum.APPLICATION_DEVICE,
-
-    DeviceTypeEnum.EMULATED_PACKET_ROUTER.value          : DeviceLayerEnum.PACKET_DEVICE,
-    DeviceTypeEnum.PACKET_ROUTER.value                   : DeviceLayerEnum.PACKET_DEVICE,
-    DeviceTypeEnum.EMULATED_PACKET_SWITCH.value          : DeviceLayerEnum.MAC_LAYER_DEVICE,
-    DeviceTypeEnum.PACKET_SWITCH.value                   : DeviceLayerEnum.MAC_LAYER_DEVICE,
-
-    DeviceTypeEnum.EMULATED_P4_SWITCH.value              : DeviceLayerEnum.MAC_LAYER_DEVICE,
-    DeviceTypeEnum.P4_SWITCH.value                       : DeviceLayerEnum.MAC_LAYER_DEVICE,
-
-    DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : DeviceLayerEnum.MAC_LAYER_CONTROLLER,
-    DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value          : DeviceLayerEnum.MAC_LAYER_CONTROLLER,
-
-    DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value       : DeviceLayerEnum.OPTICAL_CONTROLLER,
-    DeviceTypeEnum.OPEN_LINE_SYSTEM.value                : DeviceLayerEnum.OPTICAL_CONTROLLER,
-    DeviceTypeEnum.XR_CONSTELLATION.value                : DeviceLayerEnum.OPTICAL_CONTROLLER,
-
-    DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value          : DeviceLayerEnum.OPTICAL_DEVICE,
-    DeviceTypeEnum.OPTICAL_ROADM.value                   : DeviceLayerEnum.OPTICAL_DEVICE,
-    DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value    : DeviceLayerEnum.OPTICAL_DEVICE,
-    DeviceTypeEnum.OPTICAL_TRANSPONDER.value             : DeviceLayerEnum.OPTICAL_DEVICE,
-}
-
-DEVICE_LAYER_TO_SERVICE_TYPE = {
-    DeviceLayerEnum.APPLICATION_DEVICE.value   : ServiceTypeEnum.SERVICETYPE_L3NM,
-    DeviceLayerEnum.PACKET_DEVICE.value        : ServiceTypeEnum.SERVICETYPE_L3NM,
-
-    DeviceLayerEnum.MAC_LAYER_CONTROLLER.value : ServiceTypeEnum.SERVICETYPE_L2NM,
-    DeviceLayerEnum.MAC_LAYER_DEVICE.value     : ServiceTypeEnum.SERVICETYPE_L2NM,
-
-    DeviceLayerEnum.OPTICAL_CONTROLLER.value   : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
-    DeviceLayerEnum.OPTICAL_DEVICE.value       : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
-}
diff --git a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py
index c8a902999ddfb5011fd7ec09fa99ff6fa697ea40..670757d76b7d21ecf28f6ead4e8bc4e21951d18e 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py
@@ -43,13 +43,17 @@
 #
 
 import logging
-from typing import Dict, List
+from typing import Dict, List, Tuple
+from common.proto.context_pb2 import Link
 
 LOGGER = logging.getLogger(__name__)
 
-def eropath_to_hops(ero_path : List[Dict], endpoint_to_link_dict : Dict) -> List[Dict]:
+def eropath_to_hops(
+    ero_path : List[Dict], endpoint_to_link_dict : Dict[Tuple[str, str, str], Tuple[Dict, Link]]
+) -> List[Dict]:
     try:
         path_hops = []
+        num_ero_hops = len(ero_path)
         for endpoint in ero_path:
             device_uuid = endpoint['device_id']
             endpoint_uuid = endpoint['endpoint_uuid']
@@ -59,23 +63,17 @@ def eropath_to_hops(ero_path : List[Dict], endpoint_to_link_dict : Dict) -> List
                 continue
 
             last_hop = path_hops[-1]
-            if (last_hop['device'] == device_uuid):
-                if ('ingress_ep' not in last_hop) or ('egress_ep' in last_hop): continue
-                last_hop['egress_ep'] = endpoint_uuid
-                continue
+            if last_hop['device'] != device_uuid: raise Exception('Malformed path')
+            last_hop['egress_ep'] = endpoint_uuid
+
+            if num_ero_hops - 1 == len(path_hops): break
 
-            endpoint_key = (last_hop['device'], last_hop['egress_ep'])
-            link_tuple = endpoint_to_link_dict.get(endpoint_key)
-            ingress = next(iter([
-                ep_id for ep_id in link_tuple[0]['link_endpoint_ids']
-                if (ep_id['endpoint_id']['device_id'] == device_uuid) and\
-                    (ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid)
-            ]), None)
-            if ingress['endpoint_id']['device_id'] != device_uuid: raise Exception('Malformed path')
+            link_tuple = endpoint_to_link_dict[(device_uuid, endpoint_uuid, 'src')]
+            if link_tuple is None: raise Exception('Malformed path')
+            ingress = link_tuple[0]['link_endpoint_ids'][-1]
             path_hops.append({
                 'device': ingress['endpoint_id']['device_id'],
-                'ingress_ep': ingress['endpoint_id']['endpoint_uuid'],
-                'egress_ep': endpoint_uuid,
+                'ingress_ep': ingress['endpoint_id']['endpoint_uuid']
             })
         return path_hops
     except:
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
new file mode 100644
index 0000000000000000000000000000000000000000..53c89cd124cb7d3431b37a50596b0b793cfa83eb
--- /dev/null
+++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
@@ -0,0 +1,94 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+from typing import Dict, Optional, Tuple
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import Device
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+DEVICE_TYPE_TO_DEEPNESS = {
+    DeviceTypeEnum.EMULATED_DATACENTER.value             : 90,
+    DeviceTypeEnum.DATACENTER.value                      : 90,
+    DeviceTypeEnum.NETWORK.value                         : 90,
+
+    DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value          : 80,
+    DeviceTypeEnum.EMULATED_PACKET_ROUTER.value          : 70,
+    DeviceTypeEnum.PACKET_ROUTER.value                   : 70,
+
+    DeviceTypeEnum.EMULATED_PACKET_SWITCH.value          : 60,
+    DeviceTypeEnum.PACKET_SWITCH.value                   : 60,
+    DeviceTypeEnum.EMULATED_P4_SWITCH.value              : 60,
+    DeviceTypeEnum.P4_SWITCH.value                       : 60,
+
+    DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 40,
+    DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value          : 40,
+
+    DeviceTypeEnum.EMULATED_XR_CONSTELLATION.value       : 40,
+    DeviceTypeEnum.XR_CONSTELLATION.value                : 40,
+
+    DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value       : 30,
+    DeviceTypeEnum.OPEN_LINE_SYSTEM.value                : 30,
+
+    DeviceTypeEnum.EMULATED_PACKET_RADIO_ROUTER.value    : 10,
+    DeviceTypeEnum.PACKET_RADIO_ROUTER.value             : 10,
+    DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value    : 10,
+    DeviceTypeEnum.OPTICAL_TRANSPONDER.value             : 10,
+    DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value          : 10,
+    DeviceTypeEnum.OPTICAL_ROADM.value                   : 10,
+
+    DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER.value       :  0,
+}
+
+IGNORED_DEVICE_TYPES = {DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER}
+
+def get_device_controller_uuid(
+    device : Device
+) -> Optional[str]:
+    for config_rule in device.device_config.config_rules:
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        if config_rule.custom.resource_key != '_controller': continue
+        device_controller_id = json.loads(config_rule.custom.resource_value)
+        return device_controller_id['uuid']
+    return None
+
+def _map_device_type(device : Device) -> DeviceTypeEnum:
+    device_type = DeviceTypeEnum._value2member_map_.get(device.device_type) # pylint: disable=no-member
+    if device_type is None:
+        MSG = 'Unsupported DeviceType({:s}) for Device({:s})'
+        raise Exception(MSG.format(str(device.device_type), grpc_message_to_json_string(device)))
+    return device_type
+
+def _map_resource_to_deepness(device_type : DeviceTypeEnum) -> int:
+    deepness = DEVICE_TYPE_TO_DEEPNESS.get(device_type.value)
+    if deepness is None: raise Exception('Unsupported DeviceType({:s})'.format(str(device_type.value)))
+    return deepness
+
+def get_device_type(
+    device : Device, device_dict : Dict[str, Tuple[Dict, Device]], device_controller_uuid : Optional[str]
+) -> DeviceTypeEnum:
+    if device_controller_uuid is None: return _map_device_type(device)
+    device_controller_tuple = device_dict.get(device_controller_uuid)
+    if device_controller_tuple is None: raise Exception('Device({:s}) not found'.format(str(device_controller_uuid)))
+    _,device = device_controller_tuple
+    return _map_device_type(device)
+
+def get_resource_classification(
+    device : Device, device_dict : Dict[str, Tuple[Dict, Device]]
+) -> Tuple[int, DeviceTypeEnum, Optional[str]]:
+    device_controller_uuid = get_device_controller_uuid(device)
+    device_type = get_device_type(device, device_dict, device_controller_uuid)
+    resource_deepness = _map_resource_to_deepness(device_type)
+    return resource_deepness, device_type, device_controller_uuid
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py
new file mode 100644
index 0000000000000000000000000000000000000000..463b8039b6c8c611b579bdb74933c06fb0f99507
--- /dev/null
+++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py
@@ -0,0 +1,53 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import ServiceTypeEnum
+
+PACKET_DEVICE_TYPES = {
+    DeviceTypeEnum.TERAFLOWSDN_CONTROLLER,
+    DeviceTypeEnum.PACKET_ROUTER, DeviceTypeEnum.EMULATED_PACKET_ROUTER,
+    DeviceTypeEnum.PACKET_SWITCH, DeviceTypeEnum.EMULATED_PACKET_SWITCH,
+}
+
+L2_DEVICE_TYPES = {
+    DeviceTypeEnum.PACKET_SWITCH, DeviceTypeEnum.EMULATED_PACKET_SWITCH,
+    DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM, DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM,
+    DeviceTypeEnum.PACKET_RADIO_ROUTER, DeviceTypeEnum.EMULATED_PACKET_RADIO_ROUTER,
+    DeviceTypeEnum.P4_SWITCH, DeviceTypeEnum.EMULATED_P4_SWITCH,
+}
+
+OPTICAL_DEVICE_TYPES = {
+    DeviceTypeEnum.OPEN_LINE_SYSTEM, DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM,
+    DeviceTypeEnum.XR_CONSTELLATION, DeviceTypeEnum.EMULATED_XR_CONSTELLATION,
+    DeviceTypeEnum.OPTICAL_ROADM, DeviceTypeEnum.EMULATED_OPTICAL_ROADM,
+    DeviceTypeEnum.OPTICAL_TRANSPONDER, DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER,
+}
+
+SERVICE_TYPE_L2NM = {ServiceTypeEnum.SERVICETYPE_L2NM}
+SERVICE_TYPE_L3NM = {ServiceTypeEnum.SERVICETYPE_L3NM}
+SERVICE_TYPE_LXNM = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM}
+SERVICE_TYPE_TAPI = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE}
+
+def get_service_type(device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum) -> ServiceTypeEnum:
+    if device_type in PACKET_DEVICE_TYPES and prv_service_type in SERVICE_TYPE_LXNM: return prv_service_type
+    if device_type in L2_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_L2NM
+    if device_type in OPTICAL_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE
+
+    str_fields = ', '.join([
+        'device_type={:s}'.format(str(device_type)),
+        'prv_service_type={:s}'.format(str(prv_service_type)),
+    ])
+    raise Exception('Undefined Service Type for ({:s})'.format(str_fields))
diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py
index f26d74ce4c665663735bae69dcfb5a4e14311bfa..5290123b62251a58d8e0a7f273ea23c38ee2cc8a 100644
--- a/src/pathcomp/frontend/tests/Objects_A_B_C.py
+++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py
@@ -80,21 +80,36 @@ DEVICE_C3_ID, DEVICE_C3_ENDPOINTS, DEVICE_C3 = compose_device('C3', ['1', '2', '
 LINK_A2_C3_ID, LINK_A2_C3 = compose_link(DEVICE_A2_ENDPOINTS[2], DEVICE_C3_ENDPOINTS[2])
 LINK_C1_B2_ID, LINK_C1_B2 = compose_link(DEVICE_C1_ENDPOINTS[2], DEVICE_B2_ENDPOINTS[2])
 
+LINK_C3_A2_ID, LINK_C3_A2 = compose_link(DEVICE_C3_ENDPOINTS[2], DEVICE_A2_ENDPOINTS[2])
+LINK_B2_C1_ID, LINK_B2_C1 = compose_link(DEVICE_B2_ENDPOINTS[2], DEVICE_C1_ENDPOINTS[2])
+
 # ----- IntraDomain A Links --------------------------------------------------------------------------------------------
 LINK_A1_A2_ID, LINK_A1_A2 = compose_link(DEVICE_A1_ENDPOINTS[0], DEVICE_A2_ENDPOINTS[0])
 LINK_A1_A3_ID, LINK_A1_A3 = compose_link(DEVICE_A1_ENDPOINTS[1], DEVICE_A3_ENDPOINTS[0])
 LINK_A2_A3_ID, LINK_A2_A3 = compose_link(DEVICE_A2_ENDPOINTS[1], DEVICE_A3_ENDPOINTS[1])
 
+LINK_A2_A1_ID, LINK_A2_A1 = compose_link(DEVICE_A2_ENDPOINTS[0], DEVICE_A1_ENDPOINTS[0])
+LINK_A3_A1_ID, LINK_A3_A1 = compose_link(DEVICE_A3_ENDPOINTS[0], DEVICE_A1_ENDPOINTS[1])
+LINK_A3_A2_ID, LINK_A3_A2 = compose_link(DEVICE_A3_ENDPOINTS[1], DEVICE_A2_ENDPOINTS[1])
+
 # ----- IntraDomain B Links --------------------------------------------------------------------------------------------
 LINK_B1_B2_ID, LINK_B1_B2 = compose_link(DEVICE_B1_ENDPOINTS[0], DEVICE_B2_ENDPOINTS[0])
 LINK_B1_B3_ID, LINK_B1_B3 = compose_link(DEVICE_B1_ENDPOINTS[1], DEVICE_B3_ENDPOINTS[0])
 LINK_B2_B3_ID, LINK_B2_B3 = compose_link(DEVICE_B2_ENDPOINTS[1], DEVICE_B3_ENDPOINTS[1])
 
+LINK_B2_B1_ID, LINK_B2_B1 = compose_link(DEVICE_B2_ENDPOINTS[0], DEVICE_B1_ENDPOINTS[0])
+LINK_B3_B1_ID, LINK_B3_B1 = compose_link(DEVICE_B3_ENDPOINTS[0], DEVICE_B1_ENDPOINTS[1])
+LINK_B3_B2_ID, LINK_B3_B2 = compose_link(DEVICE_B3_ENDPOINTS[1], DEVICE_B2_ENDPOINTS[1])
+
 # ----- IntraDomain C Links --------------------------------------------------------------------------------------------
 LINK_C1_C2_ID, LINK_C1_C2 = compose_link(DEVICE_C1_ENDPOINTS[0], DEVICE_C2_ENDPOINTS[0])
 LINK_C1_C3_ID, LINK_C1_C3 = compose_link(DEVICE_C1_ENDPOINTS[1], DEVICE_C3_ENDPOINTS[0])
 LINK_C2_C3_ID, LINK_C2_C3 = compose_link(DEVICE_C2_ENDPOINTS[1], DEVICE_C3_ENDPOINTS[1])
 
+LINK_C2_C1_ID, LINK_C2_C1 = compose_link(DEVICE_C2_ENDPOINTS[0], DEVICE_C1_ENDPOINTS[0])
+LINK_C3_C1_ID, LINK_C3_C1 = compose_link(DEVICE_C3_ENDPOINTS[0], DEVICE_C1_ENDPOINTS[1])
+LINK_C3_C2_ID, LINK_C3_C2 = compose_link(DEVICE_C3_ENDPOINTS[1], DEVICE_C2_ENDPOINTS[1])
+
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_A1_B1 = compose_service(DEVICE_A1_ENDPOINTS[2], DEVICE_B1_ENDPOINTS[2], constraints=[
     json_constraint_sla_capacity(10.0),
@@ -108,31 +123,38 @@ DEVICES    = [  DEVICE_A1, DEVICE_A2, DEVICE_A3,
                 DEVICE_B1, DEVICE_B2, DEVICE_B3,
                 DEVICE_C1, DEVICE_C2, DEVICE_C3,    ]
 LINKS      = [  LINK_A2_C3, LINK_C1_B2,
+                LINK_C3_A2, LINK_B2_C1,
+
                 LINK_A1_A2, LINK_A1_A3, LINK_A2_A3,
+                LINK_A2_A1, LINK_A3_A1, LINK_A3_A2,
+
                 LINK_B1_B2, LINK_B1_B3, LINK_B2_B3,
-                LINK_C1_C2, LINK_C1_C3, LINK_C2_C3, ]
+                LINK_B2_B1, LINK_B3_B1, LINK_B3_B2,
+
+                LINK_C1_C2, LINK_C1_C3, LINK_C2_C3,
+                LINK_C2_C1, LINK_C3_C1, LINK_C3_C2, ]
 SERVICES   = [  SERVICE_A1_B1]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPOLOGY_ADMIN_ID,
-        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,
-            DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,
-            DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
-        [   LINK_A2_C3_ID, LINK_C1_B2_ID,
-            LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,
-            LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,
-            LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
-    ),
-    (TOPOLOGY_A_ID,
-        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,       ],
-        [   LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,    ],
-    ),
-    (TOPOLOGY_B_ID,
-        [   DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,       ],
-        [   LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,    ],
-    ),
-    (TOPOLOGY_C_ID,
-        [   DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
-        [   LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
-    ),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPOLOGY_ADMIN_ID,
+#        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,
+#            DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,
+#            DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
+#        [   LINK_A2_C3_ID, LINK_C1_B2_ID,
+#            LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,
+#            LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,
+#            LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
+#    ),
+#    (TOPOLOGY_A_ID,
+#        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,       ],
+#        [   LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,    ],
+#    ),
+#    (TOPOLOGY_B_ID,
+#        [   DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,       ],
+#        [   LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,    ],
+#    ),
+#    (TOPOLOGY_C_ID,
+#        [   DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
+#        [   LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
+#    ),
+#]
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
index 9ee784e1f76026416bca9824aa8e54e2c4f874f2..053dfd4c45e3822914745905c71f9b64300e1a2f 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
@@ -118,6 +118,11 @@ LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1
 LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
 LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
 
+LINK_CS1GW1_DC1GW_ID, LINK_CS1GW1_DC1GW = compose_link(DEV_CS1GW1_EPS[0], DEV_DC1GW_EPS[0])
+LINK_CS1GW2_DC1GW_ID, LINK_CS1GW2_DC1GW = compose_link(DEV_CS1GW2_EPS[0], DEV_DC1GW_EPS[1])
+LINK_CS2GW1_DC2GW_ID, LINK_CS2GW1_DC2GW = compose_link(DEV_CS2GW1_EPS[0], DEV_DC2GW_EPS[0])
+LINK_CS2GW2_DC2GW_ID, LINK_CS2GW2_DC2GW = compose_link(DEV_CS2GW2_EPS[0], DEV_DC2GW_EPS[1])
+
 # InterDomain CSGW-TN
 LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
 LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
@@ -128,6 +133,15 @@ LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4
 LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
 LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
 
+LINK_TNR1_CS1GW1_ID, LINK_TNR1_CS1GW1 = compose_link(DEV_TNR1_EPS[0], DEV_CS1GW1_EPS[1])
+LINK_TNR2_CS1GW2_ID, LINK_TNR2_CS1GW2 = compose_link(DEV_TNR2_EPS[0], DEV_CS1GW2_EPS[1])
+LINK_TNR2_CS1GW1_ID, LINK_TNR2_CS1GW1 = compose_link(DEV_TNR2_EPS[1], DEV_CS1GW1_EPS[2])
+LINK_TNR1_CS1GW2_ID, LINK_TNR1_CS1GW2 = compose_link(DEV_TNR1_EPS[1], DEV_CS1GW2_EPS[2])
+LINK_TNR3_CS2GW1_ID, LINK_TNR3_CS2GW1 = compose_link(DEV_TNR3_EPS[0], DEV_CS2GW1_EPS[1])
+LINK_TNR4_CS2GW2_ID, LINK_TNR4_CS2GW2 = compose_link(DEV_TNR4_EPS[0], DEV_CS2GW2_EPS[1])
+LINK_TNR4_CS2GW1_ID, LINK_TNR4_CS2GW1 = compose_link(DEV_TNR4_EPS[1], DEV_CS2GW1_EPS[2])
+LINK_TNR3_CS2GW2_ID, LINK_TNR3_CS2GW2 = compose_link(DEV_TNR3_EPS[1], DEV_CS2GW2_EPS[2])
+
 # IntraDomain TN
 LINK_TNR1_TNR2_ID, LINK_TNR1_TNR2 = compose_link(DEV_TNR1_EPS[2], DEV_TNR2_EPS[3])
 LINK_TNR2_TNR3_ID, LINK_TNR2_TNR3 = compose_link(DEV_TNR2_EPS[2], DEV_TNR3_EPS[3])
@@ -136,6 +150,13 @@ LINK_TNR4_TNR1_ID, LINK_TNR4_TNR1 = compose_link(DEV_TNR4_EPS[2], DEV_TNR1_EPS[3
 LINK_TNR1_TNR3_ID, LINK_TNR1_TNR3 = compose_link(DEV_TNR1_EPS[4], DEV_TNR3_EPS[4])
 LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4])
 
+LINK_TNR2_TNR1_ID, LINK_TNR2_TNR1 = compose_link(DEV_TNR2_EPS[3], DEV_TNR1_EPS[2])
+LINK_TNR3_TNR2_ID, LINK_TNR3_TNR2 = compose_link(DEV_TNR3_EPS[3], DEV_TNR2_EPS[2])
+LINK_TNR4_TNR3_ID, LINK_TNR4_TNR3 = compose_link(DEV_TNR4_EPS[3], DEV_TNR3_EPS[2])
+LINK_TNR1_TNR4_ID, LINK_TNR1_TNR4 = compose_link(DEV_TNR1_EPS[3], DEV_TNR4_EPS[2])
+LINK_TNR3_TNR1_ID, LINK_TNR3_TNR1 = compose_link(DEV_TNR3_EPS[4], DEV_TNR1_EPS[4])
+LINK_TNR4_TNR2_ID, LINK_TNR4_TNR2 = compose_link(DEV_TNR4_EPS[4], DEV_TNR2_EPS[4])
+
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
@@ -151,41 +172,44 @@ DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
                 DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4,
             ]
 LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_DC1GW, LINK_CS1GW2_DC1GW, LINK_CS2GW1_DC2GW, LINK_CS2GW2_DC2GW,
+
                 LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
                 LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
                 LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4,
+                LINK_TNR2_TNR1, LINK_TNR3_TNR2, LINK_TNR4_TNR3, LINK_TNR1_TNR4, LINK_TNR3_TNR1, LINK_TNR4_TNR2,
             ]
 SERVICES   = [  SERVICE_DC1GW_DC2GW   ]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPO_ADMIN_ID,
-        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
-            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
-            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-        ],
-        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
-            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
-            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
-            LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
-            LINK_TNR2_TNR4_ID,
-        ],
-    ),
-    (TOPO_DC1_ID,
-        [DEV_DC1GW_ID],
-        []),
-    (TOPO_DC2_ID,
-        [DEV_DC2GW_ID],
-        []),
-    (TOPO_CS1_ID,
-        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
-        []),
-    (TOPO_CS2_ID,
-        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
-        []),
-    (TOPO_TN_ID,
-        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-        ],
-        [   LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
-            LINK_TNR2_TNR4_ID,
-        ]),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPO_ADMIN_ID,
+#        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+#            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+#            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#        ],
+#        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+#            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+#            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+#            LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+#            LINK_TNR2_TNR4_ID,
+#        ],
+#    ),
+#    (TOPO_DC1_ID,
+#        [DEV_DC1GW_ID],
+#        []),
+#    (TOPO_DC2_ID,
+#        [DEV_DC2GW_ID],
+#        []),
+#    (TOPO_CS1_ID,
+#        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+#        []),
+#    (TOPO_CS2_ID,
+#        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+#        []),
+#    (TOPO_TN_ID,
+#        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#        ],
+#        [   LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+#            LINK_TNR2_TNR4_ID,
+#        ]),
+#]
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
index 71510d088746bd791e4671686dd5114874dd5a2a..2c8428568c001a53cbf2c08aa13b61ad14a1bd51 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
@@ -130,6 +130,11 @@ LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1
 LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
 LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
 
+LINK_CS1GW1_DC1GW_ID, LINK_CS1GW1_DC1GW = compose_link(DEV_CS1GW1_EPS[0], DEV_DC1GW_EPS[0])
+LINK_CS1GW2_DC1GW_ID, LINK_CS1GW2_DC1GW = compose_link(DEV_CS1GW2_EPS[0], DEV_DC1GW_EPS[1])
+LINK_CS2GW1_DC2GW_ID, LINK_CS2GW1_DC2GW = compose_link(DEV_CS2GW1_EPS[0], DEV_DC2GW_EPS[0])
+LINK_CS2GW2_DC2GW_ID, LINK_CS2GW2_DC2GW = compose_link(DEV_CS2GW2_EPS[0], DEV_DC2GW_EPS[1])
+
 # InterDomain CSGW-TN
 LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
 LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
@@ -140,12 +145,26 @@ LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4
 LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
 LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
 
+LINK_TNR1_CS1GW1_ID, LINK_TNR1_CS1GW1 = compose_link(DEV_TNR1_EPS[0], DEV_CS1GW1_EPS[1])
+LINK_TNR2_CS1GW2_ID, LINK_TNR2_CS1GW2 = compose_link(DEV_TNR2_EPS[0], DEV_CS1GW2_EPS[1])
+LINK_TNR2_CS1GW1_ID, LINK_TNR2_CS1GW1 = compose_link(DEV_TNR2_EPS[1], DEV_CS1GW1_EPS[2])
+LINK_TNR1_CS1GW2_ID, LINK_TNR1_CS1GW2 = compose_link(DEV_TNR1_EPS[1], DEV_CS1GW2_EPS[2])
+LINK_TNR3_CS2GW1_ID, LINK_TNR3_CS2GW1 = compose_link(DEV_TNR3_EPS[0], DEV_CS2GW1_EPS[1])
+LINK_TNR4_CS2GW2_ID, LINK_TNR4_CS2GW2 = compose_link(DEV_TNR4_EPS[0], DEV_CS2GW2_EPS[1])
+LINK_TNR4_CS2GW1_ID, LINK_TNR4_CS2GW1 = compose_link(DEV_TNR4_EPS[1], DEV_CS2GW1_EPS[2])
+LINK_TNR3_CS2GW2_ID, LINK_TNR3_CS2GW2 = compose_link(DEV_TNR3_EPS[1], DEV_CS2GW2_EPS[2])
+
 # IntraDomain TN
 LINK_TNR1_TOLS_ID, LINK_TNR1_TOLS = compose_link(DEV_TNR1_EPS[2], DEV_TOLS_EPS[0])
 LINK_TNR2_TOLS_ID, LINK_TNR2_TOLS = compose_link(DEV_TNR2_EPS[2], DEV_TOLS_EPS[1])
 LINK_TNR3_TOLS_ID, LINK_TNR3_TOLS = compose_link(DEV_TNR3_EPS[2], DEV_TOLS_EPS[2])
 LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3])
 
+LINK_TOLS_TNR1_ID, LINK_TOLS_TNR1 = compose_link(DEV_TOLS_EPS[0], DEV_TNR1_EPS[2])
+LINK_TOLS_TNR2_ID, LINK_TOLS_TNR2 = compose_link(DEV_TOLS_EPS[1], DEV_TNR2_EPS[2])
+LINK_TOLS_TNR3_ID, LINK_TOLS_TNR3 = compose_link(DEV_TOLS_EPS[2], DEV_TNR3_EPS[2])
+LINK_TOLS_TNR4_ID, LINK_TOLS_TNR4 = compose_link(DEV_TOLS_EPS[3], DEV_TNR4_EPS[2])
+
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
@@ -162,41 +181,47 @@ DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
                 DEV_TOLS,
             ]
 LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_DC1GW, LINK_CS1GW2_DC1GW, LINK_CS2GW1_DC2GW, LINK_CS2GW2_DC2GW,
+
                 LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
                 LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
+                LINK_TNR1_CS1GW1, LINK_TNR2_CS1GW2, LINK_TNR2_CS1GW1, LINK_TNR1_CS1GW2,
+                LINK_TNR3_CS2GW1, LINK_TNR4_CS2GW2, LINK_TNR4_CS2GW1, LINK_TNR3_CS2GW2,
+
                 LINK_TNR1_TOLS, LINK_TNR2_TOLS, LINK_TNR3_TOLS, LINK_TNR4_TOLS,
+                LINK_TOLS_TNR1, LINK_TOLS_TNR2, LINK_TOLS_TNR3, LINK_TOLS_TNR4,
             ]
 SERVICES   = [  SERVICE_DC1GW_DC2GW   ]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPO_ADMIN_ID,
-        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
-            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
-            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-            DEV_TOLS_ID,
-        ],
-        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
-            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
-            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
-            LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
-        ],
-    ),
-    (TOPO_DC1_ID,
-        [DEV_DC1GW_ID],
-        []),
-    (TOPO_DC2_ID,
-        [DEV_DC2GW_ID],
-        []),
-    (TOPO_CS1_ID,
-        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
-        []),
-    (TOPO_CS2_ID,
-        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
-        []),
-    (TOPO_TN_ID,
-        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-            DEV_TOLS_ID,
-        ],
-        [   LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
-        ]),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPO_ADMIN_ID,
+#        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+#            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+#            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#            DEV_TOLS_ID,
+#        ],
+#        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+#            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+#            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+#            LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+#        ],
+#    ),
+#    (TOPO_DC1_ID,
+#        [DEV_DC1GW_ID],
+#        []),
+#    (TOPO_DC2_ID,
+#        [DEV_DC2GW_ID],
+#        []),
+#    (TOPO_CS1_ID,
+#        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+#        []),
+#    (TOPO_CS2_ID,
+#        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+#        []),
+#    (TOPO_TN_ID,
+#        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#            DEV_TOLS_ID,
+#        ],
+#        [   LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+#        ]),
+#]
diff --git a/src/pathcomp/frontend/tests/test_pathcomp/__init__.py b/src/pathcomp/frontend/tests/test_pathcomp/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/pathcomp/frontend/tests/test_pathcomp/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/pathcomp/frontend/tests/test_pathcomp/__main__.py b/src/pathcomp/frontend/tests/test_pathcomp/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba1cc4a2c1838248882b983c1ed25c27b395aa9a
--- /dev/null
+++ b/src/pathcomp/frontend/tests/test_pathcomp/__main__.py
@@ -0,0 +1,33 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging, sys
+from common.proto.context_pb2 import ServiceTypeEnum
+from pathcomp.frontend.service.algorithms.tools.ComputeSubServices import convert_explicit_path_hops_to_connections
+from .data import path_hops, device_dict
+
+logging.basicConfig(level=logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+
+def main():
+    service_uuid = 'dc-2-dc-svc'
+    service_type = ServiceTypeEnum.SERVICETYPE_L2NM
+    connections = convert_explicit_path_hops_to_connections(path_hops, device_dict, service_uuid, service_type)
+    str_connections = '\n'.join(['  ' + str(connection) for connection in connections])
+    LOGGER.debug('connections = [\n{:s}\n]'.format(str_connections))
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/pathcomp/frontend/tests/test_pathcomp/data.py b/src/pathcomp/frontend/tests/test_pathcomp/data.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeac5e38a222fb2dfc3f7ae98b2737b47f855ee4
--- /dev/null
+++ b/src/pathcomp/frontend/tests/test_pathcomp/data.py
@@ -0,0 +1,70 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+from typing import Dict, Tuple
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import ConfigActionEnum, Device
+
+path_hops = [
+    {'device': 'DC1',      'ingress_ep': 'int',                  'egress_ep': 'eth1'                 },
+    {'device': 'PE1',      'ingress_ep': '1/1',                  'egress_ep': '1/2'                  },
+    {'device': 'MW1-2',    'ingress_ep': '172.18.0.1:1',         'egress_ep': '172.18.0.2:1'         },
+    {'device': 'HUB1',     'ingress_ep': '1/1',                  'egress_ep': 'XR-T1'                },
+    {'device': 'splitter', 'ingress_ep': 'common',               'egress_ep': 'leaf1'                },
+    {'device': 'OLS',      'ingress_ep': 'node_1_port_13-input', 'egress_ep': 'node_4_port_13-output'},
+    {'device': 'LEAF2',    'ingress_ep': 'XR-T1',                'egress_ep': '1/1'                  },
+    {'device': 'PE4',      'ingress_ep': '1/1',                  'egress_ep': '1/2'                  },
+    {'device': 'DC2',      'ingress_ep': 'eth2',                 'egress_ep': 'int'                  }
+]
+
+device_data = {
+    'TFS'     : {'controller_uuid': None,  'device_type': DeviceTypeEnum.TERAFLOWSDN_CONTROLLER   },
+    'IPM'     : {'controller_uuid': None,  'device_type': DeviceTypeEnum.XR_CONSTELLATION         },
+    'OLS'     : {'controller_uuid': None,  'device_type': DeviceTypeEnum.OPEN_LINE_SYSTEM         },
+    'MW1-2'   : {'controller_uuid': None,  'device_type': DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM   },
+    'MW3-4'   : {'controller_uuid': None,  'device_type': DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM   },
+
+    'DC1'     : {'controller_uuid': None,  'device_type': DeviceTypeEnum.EMULATED_DATACENTER      },
+    'DC2'     : {'controller_uuid': None,  'device_type': DeviceTypeEnum.EMULATED_DATACENTER      },
+
+    'PE1'     : {'controller_uuid': 'TFS', 'device_type': DeviceTypeEnum.PACKET_ROUTER            },
+    'PE2'     : {'controller_uuid': 'TFS', 'device_type': DeviceTypeEnum.PACKET_ROUTER            },
+    'PE3'     : {'controller_uuid': 'TFS', 'device_type': DeviceTypeEnum.PACKET_ROUTER            },
+    'PE4'     : {'controller_uuid': 'TFS', 'device_type': DeviceTypeEnum.PACKET_ROUTER            },
+
+    'HUB1'    : {'controller_uuid': 'IPM', 'device_type': DeviceTypeEnum.PACKET_ROUTER            },
+    'LEAF1'   : {'controller_uuid': 'IPM', 'device_type': DeviceTypeEnum.PACKET_ROUTER            },
+    'LEAF2'   : {'controller_uuid': 'IPM', 'device_type': DeviceTypeEnum.PACKET_ROUTER            },
+
+    'splitter': {'controller_uuid': None,  'device_type': DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER},
+}
+
+def process_device(device_uuid, json_device) -> Tuple[Dict, Device]:
+    grpc_device = Device()
+    grpc_device.device_id.device_uuid.uuid = device_uuid            # pylint: disable=no-member
+    grpc_device.device_type = json_device['device_type'].value
+    controller_uuid = json_device.get('controller_uuid')
+    if controller_uuid is not None:
+        config_rule = grpc_device.device_config.config_rules.add()  # pylint: disable=no-member
+        config_rule.action = ConfigActionEnum.CONFIGACTION_SET
+        config_rule.custom.resource_key = '_controller'
+        config_rule.custom.resource_value = json.dumps({'uuid': controller_uuid})
+    return json_device, grpc_device
+
+device_dict = {
+    device_uuid:process_device(device_uuid, json_device)
+    for device_uuid,json_device in device_data.items()
+}
diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py
index 8088259b80b8ade2669568b74f004dcfa631dd9c..f4e3cbf0f60285b960625a677854c4b7ab4decb9 100644
--- a/src/pathcomp/frontend/tests/test_unitary.py
+++ b/src/pathcomp/frontend/tests/test_unitary.py
@@ -13,12 +13,15 @@
 # limitations under the License.
 
 import copy, logging, os
-from common.proto.context_pb2 import Context, ContextId, DeviceId, Link, LinkId, Topology, Device, TopologyId
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId
 from common.proto.pathcomp_pb2 import PathCompRequest
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Constraint import (
     json_constraint_custom, json_constraint_endpoint_location_region, json_constraint_endpoint_priority,
     json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_latency)
+from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.tools.object_factory.Service import json_service_l3nm_planned
@@ -26,9 +29,9 @@ from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 
 # Scenarios:
-#from .Objects_A_B_C import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
-#from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
-from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
+#from .Objects_A_B_C import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
+#from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
+from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
 
 # configure backend environment variables before overwriting them with fixtures to use real backend pathcomp
 DEFAULT_PATHCOMP_BACKEND_SCHEME  = 'http'
@@ -58,31 +61,29 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-def test_prepare_environment(
-    context_client : ContextClient):    # pylint: disable=redefined-outer-name
-
-    for context  in CONTEXTS  : context_client.SetContext (Context (**context ))
-    for topology in TOPOLOGIES: context_client.SetTopology(Topology(**topology))
-    for device   in DEVICES   : context_client.SetDevice  (Device  (**device  ))
-    for link     in LINKS     : context_client.SetLink    (Link    (**link    ))
-
-    for topology_id, device_ids, link_ids in OBJECTS_PER_TOPOLOGY:
-        topology = Topology()
-        topology.CopyFrom(context_client.GetTopology(TopologyId(**topology_id)))
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+DESCRIPTORS = {
+    'dummy_mode': True,
+    'contexts'  : CONTEXTS,
+    'topologies': TOPOLOGIES,
+    'devices'   : DEVICES,
+    'links'     : LINKS,
+}
 
-        device_ids_in_topology = {device_id.device_uuid.uuid for device_id in topology.device_ids}
-        func_device_id_not_added = lambda device_id: device_id['device_uuid']['uuid'] not in device_ids_in_topology
-        func_device_id_json_to_grpc = lambda device_id: DeviceId(**device_id)
-        device_ids_to_add = list(map(func_device_id_json_to_grpc, filter(func_device_id_not_added, device_ids)))
-        topology.device_ids.extend(device_ids_to_add)
+def test_prepare_environment(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+) -> None:
+    validate_empty_scenario(context_client)
 
-        link_ids_in_topology = {link_id.link_uuid.uuid for link_id in topology.link_ids}
-        func_link_id_not_added = lambda link_id: link_id['link_uuid']['uuid'] not in link_ids_in_topology
-        func_link_id_json_to_grpc = lambda link_id: LinkId(**link_id)
-        link_ids_to_add = list(map(func_link_id_json_to_grpc, filter(func_link_id_not_added, link_ids)))
-        topology.link_ids.extend(link_ids_to_add)
+    descriptor_loader = DescriptorLoader(descriptors=DESCRIPTORS, context_client=context_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
 
-        context_client.SetTopology(topology)
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
 
 def test_request_service_shortestpath(
     pathcomp_client : PathCompClient):  # pylint: disable=redefined-outer-name
@@ -266,9 +267,15 @@ def test_request_service_kdisjointpath(
 
 
 def test_cleanup_environment(
-    context_client : ContextClient):    # pylint: disable=redefined-outer-name
-
-    for link     in LINKS     : context_client.RemoveLink    (LinkId    (**link    ['link_id'    ]))
-    for device   in DEVICES   : context_client.RemoveDevice  (DeviceId  (**device  ['device_id'  ]))
-    for topology in TOPOLOGIES: context_client.RemoveTopology(TopologyId(**topology['topology_id']))
-    for context  in CONTEXTS  : context_client.RemoveContext (ContextId (**context ['context_id' ]))
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+) -> None:
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors=DESCRIPTORS, context_client=context_client)
+    descriptor_loader.validate()
+    descriptor_loader.unload()
+    validate_empty_scenario(context_client)
diff --git a/src/policy/pom.xml b/src/policy/pom.xml
index 6ea28421abedf6916e998b6cfdebe23c34908c4a..267006311f82c11bce4db29f2d114f30c1832f88 100644
--- a/src/policy/pom.xml
+++ b/src/policy/pom.xml
@@ -179,6 +179,11 @@
             <scope>test</scope>
         </dependency>
 
+        <dependency>
+            <groupId>io.quarkus</groupId>
+            <artifactId>quarkus-smallrye-metrics</artifactId>
+        </dependency>
+
     </dependencies>
 
     <build>
diff --git a/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java b/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
index c10e5dc8b91ee9dcc2ae8aa74526faeb4e4bfcec..30e888d9fab1aae535dca345c7c56e28218bd2c2 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
@@ -20,6 +20,9 @@ import context.ContextOuterClass.ServiceId;
 import io.quarkus.grpc.GrpcService;
 import io.smallrye.mutiny.Uni;
 import javax.inject.Inject;
+import org.eclipse.microprofile.metrics.MetricUnits;
+import org.eclipse.microprofile.metrics.annotation.Counted;
+import org.eclipse.microprofile.metrics.annotation.Timed;
 import policy.Policy;
 import policy.Policy.PolicyRuleBasic;
 import policy.Policy.PolicyRuleDevice;
@@ -41,6 +44,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyAddService_counter")
+    @Timed(name = "policy_policyAddService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyAddService(PolicyRuleService request) {
         final var policyRuleService = serializer.deserialize(request);
 
@@ -51,6 +56,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyUpdateService_counter")
+    @Timed(name = "policy_policyUpdateService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyUpdateService(PolicyRuleService request) {
         final var policyRuleService = serializer.deserialize(request);
 
@@ -61,6 +68,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyAddDevice_counter")
+    @Timed(name = "policy_policyAddDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyAddDevice(PolicyRuleDevice request) {
         final var policyRuleDevice = serializer.deserialize(request);
 
@@ -71,6 +80,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyUpdateDevice_counter")
+    @Timed(name = "policy_policyUpdateDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyUpdateDevice(PolicyRuleDevice request) {
         final var policyRuleDevice = serializer.deserialize(request);
 
@@ -81,6 +92,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyDelete_counter")
+    @Timed(name = "policy_policyDelete_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyDelete(PolicyRuleId request) {
         final var policyRuleId = serializer.deserialize(request);
 
@@ -88,6 +101,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyService_counter")
+    @Timed(name = "policy_getPolicyService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleService> getPolicyService(PolicyRuleId request) {
         final var policyRuleBasic = PolicyRuleBasic.newBuilder().setPolicyRuleId(request).build();
 
@@ -96,6 +111,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyDevice_counter")
+    @Timed(name = "policy_getPolicyDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleDevice> getPolicyDevice(PolicyRuleId request) {
         final var policyRuleBasic = PolicyRuleBasic.newBuilder().setPolicyRuleId(request).build();
 
@@ -104,6 +121,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyByServiceId_counter")
+    @Timed(name = "policy_getPolicyByServiceId_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleServiceList> getPolicyByServiceId(ServiceId request) {
         return Uni.createFrom().item(() -> Policy.PolicyRuleServiceList.newBuilder().build());
     }
diff --git a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
index 529ec633426e41f3218857642aa6751ac574ab23..967d1d6e604e312fe9d8314beea023f902af776b 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
@@ -2245,6 +2245,8 @@ public class Serializer {
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352;
             case XR:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR;
+            case IETF_L2VPN:
+                return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN;
             case UNDEFINED:
             default:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED;
@@ -2266,6 +2268,8 @@ public class Serializer {
                 return DeviceDriverEnum.ONF_TR_352;
             case DEVICEDRIVER_XR:
                 return DeviceDriverEnum.XR;
+            case DEVICEDRIVER_IETF_L2VPN:
+                return DeviceDriverEnum.IETF_L2VPN;
             case DEVICEDRIVER_UNDEFINED:
             case UNRECOGNIZED:
             default:
diff --git a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
index daee299ddf64327c0d782e640cd1e924e139dccb..ad763e35dfeef71c2f9f73dbf51785a3e03c0e0d 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
@@ -23,5 +23,6 @@ public enum DeviceDriverEnum {
     P4,
     IETF_NETWORK_TOPOLOGY,
     ONF_TR_352,
-    XR
+    XR,
+    IETF_L2VPN
 }
diff --git a/src/policy/src/main/resources/application.yml b/src/policy/src/main/resources/application.yml
index e908f5e36265fb2c3050f1b7e4247847463fc385..38a222d7934751e9eac28854300d55bd631a669c 100644
--- a/src/policy/src/main/resources/application.yml
+++ b/src/policy/src/main/resources/application.yml
@@ -37,6 +37,7 @@ quarkus:
     group: tfs
     name: controller/policy
     registry: labs.etsi.org:5050
+    tag: 0.1.0
 
   kubernetes:
     name: policyservice
@@ -52,14 +53,18 @@ quarkus:
       period: 10s
     ports:
       http:
-        host-port: 8080
+        host-port: 9192
         container-port: 8080
-      grpc:
-        host-port: 6060
-        container-port: 6060
     env:
       vars:
         context-service-host: "contextservice"
         monitoring-service-host: "monitoringservice"
         service-service-host: "serviceservice"
+    resources:
+      requests:
+        cpu: 50m
+        memory: 512Mi
+      limits:
+        cpu: 500m
+        memory: 2048Mi
 
diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
index d284840b8dedb0320d49a5d5c6a1943c10d2afed..b0fb90864ce32bf6b793dded5d1f9de1dfba5097 100644
--- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
+++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
@@ -3600,6 +3600,9 @@ class SerializerTest {
                         DeviceDriverEnum.ONF_TR_352,
                         ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352),
                 Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR),
+                Arguments.of(
+                        DeviceDriverEnum.IETF_L2VPN,
+                        ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN),
                 Arguments.of(
                         DeviceDriverEnum.UNDEFINED, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED));
     }
diff --git a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
index fbbba62a2baa1c2fe2b3c3fe090883d6542996e4..53252341b30dc093c79d5a54baf98b82e6a24b75 100644
--- a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
+++ b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
@@ -177,6 +177,10 @@ public final class ContextOuterClass {
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
     DEVICEDRIVER_XR(6),
+    /**
+     * <code>DEVICEDRIVER_IETF_L2VPN = 7;</code>
+     */
+    DEVICEDRIVER_IETF_L2VPN(7),
     UNRECOGNIZED(-1),
     ;
 
@@ -212,6 +216,10 @@ public final class ContextOuterClass {
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
     public static final int DEVICEDRIVER_XR_VALUE = 6;
+    /**
+     * <code>DEVICEDRIVER_IETF_L2VPN = 7;</code>
+     */
+    public static final int DEVICEDRIVER_IETF_L2VPN_VALUE = 7;
 
 
     public final int getNumber() {
@@ -245,6 +253,7 @@ public final class ContextOuterClass {
         case 4: return DEVICEDRIVER_IETF_NETWORK_TOPOLOGY;
         case 5: return DEVICEDRIVER_ONF_TR_352;
         case 6: return DEVICEDRIVER_XR;
+        case 7: return DEVICEDRIVER_IETF_L2VPN;
         default: return null;
       }
     }
diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml
index 40516e5cc3fdd1fb993a1248ad36ea7551edfc40..f1079230f5e5efb75fb14d6cd6f3ad3fb5c9d2e3 100644
--- a/src/policy/target/kubernetes/kubernetes.yml
+++ b/src/policy/target/kubernetes/kubernetes.yml
@@ -4,21 +4,24 @@
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 ---
 apiVersion: v1
 kind: Service
 metadata:
   annotations:
-    app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-    app.quarkus.io/build-timestamp: 2022-11-18 - 12:56:37 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:56:04 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app.kubernetes.io/name: policyservice
     app: policyservice
@@ -26,9 +29,9 @@ metadata:
 spec:
   ports:
     - name: http
-      port: 8080
+      port: 9192
       targetPort: 8080
-    - name: grpc
+    - name: grpc-server
       port: 6060
       targetPort: 6060
   selector:
@@ -39,8 +42,12 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   annotations:
-    app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-    app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:56:04 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app: policyservice
     app.kubernetes.io/name: policyservice
@@ -53,8 +60,12 @@ spec:
   template:
     metadata:
       annotations:
-        app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-        app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000
+        app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+        app.quarkus.io/build-timestamp: 2023-04-04 - 11:56:04 +0000
+        prometheus.io/scrape: "true"
+        prometheus.io/path: /q/metrics
+        prometheus.io/port: "8080"
+        prometheus.io/scheme: http
       labels:
         app: policyservice
         app.kubernetes.io/name: policyservice
@@ -89,7 +100,7 @@ spec:
               name: http
               protocol: TCP
             - containerPort: 6060
-              name: grpc
+              name: grpc-server
               protocol: TCP
           readinessProbe:
             failureThreshold: 3
@@ -101,3 +112,10 @@ spec:
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
+          resources:
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+            requests:
+              cpu: 50m
+              memory: 512Mi
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index 0b2e0760161c109a2ba6a5feecc931e8bcf5c14f..6531376b84732b1ec80e335cfc6cd816be944b0a 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -19,12 +19,12 @@ from common.method_wrappers.ServiceExceptions import AlreadyExistsException, Inv
 from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum
 from common.proto.pathcomp_pb2 import PathCompRequest
 from common.proto.service_pb2_grpc import ServiceServiceServicer
+from common.tools.context_queries.Service import get_service_by_id
 from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
 from .task_scheduler.TaskScheduler import TasksScheduler
-from .tools.ContextGetters import get_service
 
 LOGGER = logging.getLogger(__name__)
 
@@ -69,7 +69,9 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
 
         # check that service does not exist
         context_client = ContextClient()
-        current_service = get_service(context_client, request.service_id)
+        current_service = get_service_by_id(
+            context_client, request.service_id, rw_copy=False,
+            include_config_rules=False, include_constraints=False, include_endpoint_ids=False)
         if current_service is not None:
             context_uuid = request.service_id.context_id.context_uuid.uuid
             service_uuid = request.service_id.service_uuid.uuid
@@ -86,7 +88,9 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
         # Set service status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the service is
         # being modified.
         context_client = ContextClient()
-        _service : Optional[Service] = get_service(context_client, request.service_id)
+        _service : Optional[Service] = get_service_by_id(
+            context_client, request.service_id, rw_copy=False,
+            include_config_rules=False, include_constraints=False, include_endpoint_ids=False)
         service = Service()
         service.CopyFrom(request if _service is None else _service)
         if service.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN:                     # pylint: disable=no-member
@@ -106,7 +110,11 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
             service.service_config.config_rules.add().CopyFrom(config_rule) # pylint: disable=no-member
 
         service_id_with_uuids = context_client.SetService(service)
-        service_with_uuids = context_client.GetService(service_id_with_uuids)
+
+        # PathComp requires endpoints, constraints and config rules
+        service_with_uuids = get_service_by_id(
+            context_client, service_id_with_uuids, rw_copy=False,
+            include_config_rules=True, include_constraints=True, include_endpoint_ids=True)
 
         num_disjoint_paths = 0
         for constraint in request.service_constraints:
@@ -147,10 +155,8 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
 
         # Set service status to "SERVICESTATUS_PENDING_REMOVAL" to ensure rest of components are aware the service is
         # being modified.
-        _service : Optional[Service] = get_service(context_client, request)
-        if _service is None: raise Exception('Service({:s}) not found'.format(grpc_message_to_json_string(request)))
-        service = Service()
-        service.CopyFrom(_service)
+        service : Optional[Service] = get_service_by_id(context_client, request, rw_copy=True)
+        if service is None: raise Exception('Service({:s}) not found'.format(grpc_message_to_json_string(request)))
         # pylint: disable=no-member
         service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL
         context_client.SetService(service)
diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py
index a73ec53f37d68e0414eeb1df146373c6906273c5..3ec71dc64536e28457c4f1adbf3679186285786d 100644
--- a/src/service/service/service_handler_api/FilterFields.py
+++ b/src/service/service/service_handler_api/FilterFields.py
@@ -33,7 +33,8 @@ DEVICE_DRIVER_VALUES = {
     DeviceDriverEnum.DEVICEDRIVER_P4,
     DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
     DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352,
-    DeviceDriverEnum.DEVICEDRIVER_XR
+    DeviceDriverEnum.DEVICEDRIVER_XR,
+    DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN,
 }
 
 # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified
diff --git a/src/service/service/service_handler_api/ServiceHandlerFactory.py b/src/service/service/service_handler_api/ServiceHandlerFactory.py
index 6aa21b49920254383fad5f28aa234b6ec0cad5a3..64ea204a2600a71b08c8c373a15640f5e2134787 100644
--- a/src/service/service/service_handler_api/ServiceHandlerFactory.py
+++ b/src/service/service/service_handler_api/ServiceHandlerFactory.py
@@ -73,6 +73,9 @@ class ServiceHandlerFactory:
             if field_indice is None: continue
             if not isinstance(field_values, Iterable) or isinstance(field_values, str):
                 field_values = [field_values]
+            if len(field_values) == 0:
+                # do not allow empty fields; might cause wrong selection
+                raise UnsatisfiedFilterException(filter_fields)
 
             field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name)
 
diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py
index 4c9059779d6b7031685e1de76b0a7ed651af6c5f..257bc138fe932e7e5abee00981848248039d0b3f 100644
--- a/src/service/service/service_handlers/__init__.py
+++ b/src/service/service/service_handlers/__init__.py
@@ -15,12 +15,14 @@
 from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum
 from ..service_handler_api.FilterFields import FilterFieldEnum
 from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler
+from .l2nm_ietfl2vpn.L2NM_IETFL2VPN_ServiceHandler import L2NM_IETFL2VPN_ServiceHandler
 from .l2nm_openconfig.L2NMOpenConfigServiceHandler import L2NMOpenConfigServiceHandler
 from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler
 from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler
+from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler
 from .p4.p4_service_handler import P4ServiceHandler
 from .tapi_tapi.TapiServiceHandler import TapiServiceHandler
-from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler
+from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler
 
 SERVICE_HANDLERS = [
     (L2NMEmulatedServiceHandler, [
@@ -50,13 +52,19 @@ SERVICE_HANDLERS = [
     (TapiServiceHandler, [
         {
             FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
-            FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, DeviceDriverEnum.DEVICEDRIVER_XR],
+            FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API],
+        }
+    ]),
+    (TapiXrServiceHandler, [
+        {
+            FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
+            FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_XR],
         }
     ]),
     (MicrowaveServiceHandler, [
         {
             FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_L2NM,
-            FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
+            FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352],
         }
     ]),
     (P4ServiceHandler, [
@@ -65,4 +73,10 @@ SERVICE_HANDLERS = [
             FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4,
         }
     ]),
+    (L2NM_IETFL2VPN_ServiceHandler, [
+        {
+            FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_L2NM,
+            FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN],
+        }
+    ]),
 ]
diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
index c2ea6e213ee8d18b4507089fb2762c913e03039a..ac44574ad60242b0acf21ba824ea448d5ec30bf1 100644
--- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
+++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
@@ -21,15 +21,18 @@ def setup_config_rules(
     service_settings : TreeNode, endpoint_settings : TreeNode
 ) -> List[Dict]:
 
-    json_settings          : Dict = {} if service_settings  is None else service_settings.value
-    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+    if service_settings  is None: return []
+    if endpoint_settings is None: return []
 
-    mtu                 = json_settings.get('mtu',                 1450 )    # 1512
+    #json_settings          : Dict = service_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
+
+    #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
     #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
     #bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
     #bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
 
-    router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
     #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
     sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
     vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
@@ -43,17 +46,17 @@ def setup_config_rules(
     connection_point_id   = 'VC-1'
 
     json_config_rules = [
-        json_config_rule_set(
-            '/network_instance[default]',
-            {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}),
+        #json_config_rule_set(
+        #    '/network_instance[default]',
+        #    {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}),
 
-        json_config_rule_set(
-            '/network_instance[default]/protocols[OSPF]',
-            {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}),
+        #json_config_rule_set(
+        #    '/network_instance[default]/protocols[OSPF]',
+        #    {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}),
 
-        json_config_rule_set(
-            '/network_instance[default]/protocols[STATIC]',
-            {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}),
+        #json_config_rule_set(
+        #    '/network_instance[default]/protocols[STATIC]',
+        #    {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}),
 
         json_config_rule_set(
             '/network_instance[{:s}]'.format(network_instance_name),
@@ -66,7 +69,7 @@ def setup_config_rules(
         json_config_rule_set(
             '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
             {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
-            'subinterface': sub_interface_index}),
+             'subinterface': sub_interface_index}),
 
         json_config_rule_set(
             '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
@@ -80,15 +83,18 @@ def teardown_config_rules(
     service_settings : TreeNode, endpoint_settings : TreeNode
 ) -> List[Dict]:
 
-    #json_settings          : Dict = {} if service_settings  is None else service_settings.value
-    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+    if service_settings  is None: return []
+    if endpoint_settings is None: return []
+
+    #json_settings          : Dict = service_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
 
     #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
     #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
     #bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
     #bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
 
-    router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
     #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
     sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
     #vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
@@ -111,24 +117,24 @@ def teardown_config_rules(
             {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
             'subinterface': sub_interface_index}),
 
-        json_config_rule_delete(
-            '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index),
-            {'name': if_cirid_name, 'index': sub_interface_index}),
-
         json_config_rule_delete(
             '/network_instance[{:s}]'.format(network_instance_name),
             {'name': network_instance_name}),
 
         json_config_rule_delete(
-            '/network_instance[default]/protocols[STATIC]',
-            {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}),
+            '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index),
+            {'name': if_cirid_name, 'index': sub_interface_index}),
 
-        json_config_rule_delete(
-            '/network_instance[default]/protocols[OSPF]',
-            {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}),
+        #json_config_rule_delete(
+        #    '/network_instance[default]/protocols[STATIC]',
+        #    {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}),
 
-        json_config_rule_delete(
-            '/network_instance[default]',
-            {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}),
+        #json_config_rule_delete(
+        #    '/network_instance[default]/protocols[OSPF]',
+        #    {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}),
+
+        #json_config_rule_delete(
+        #    '/network_instance[default]',
+        #    {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}),
     ]
     return json_config_rules
diff --git a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py
index 9de6c607b1336a4b3fb43867efc16d30048177e0..416c10f72fe2199ce241c4d527d9c58ce93d2b44 100644
--- a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py
+++ b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py
@@ -14,7 +14,7 @@
 
 import json, logging
 from typing import Any, List, Optional, Tuple, Union
-from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.proto.context_pb2 import ConfigRule, DeviceId, Service
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
@@ -26,22 +26,7 @@ from .ConfigRules import setup_config_rules, teardown_config_rules
 
 LOGGER = logging.getLogger(__name__)
 
-HISTOGRAM_BUCKETS = (
-    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
-    0.0010, 0.0025, 0.0050, 0.0075,
-    0.0100, 0.0250, 0.0500, 0.0750,
-    0.1000, 0.2500, 0.5000, 0.7500,
-    1.0000, 2.5000, 5.0000, 7.5000,
-    10.0000, 25.000, 50.0000, 75.000,
-    100.0, INF
-)
 METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l2nm_emulated'})
-METRICS_POOL.get_or_create('SetEndpoint',      MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteEndpoint',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConstraint',    MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
 
 class L2NMEmulatedServiceHandler(_ServiceHandler):
     def __init__(   # pylint: disable=super-init-not-called
@@ -75,10 +60,12 @@ class L2NMEmulatedServiceHandler(_ServiceHandler):
                     service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name,
                     settings, endpoint_settings)
 
-                del device_obj.device_config.config_rules[:]
-                for json_config_rule in json_config_rules:
-                    device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
-                self.__task_executor.configure_device(device_obj)
+                if len(json_config_rules) > 0:
+                    del device_obj.device_config.config_rules[:]
+                    for json_config_rule in json_config_rules:
+                        device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                    self.__task_executor.configure_device(device_obj)
+
                 results.append(True)
             except Exception as e: # pylint: disable=broad-except
                 LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint)))
@@ -110,10 +97,12 @@ class L2NMEmulatedServiceHandler(_ServiceHandler):
                     service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name,
                     settings, endpoint_settings)
 
-                del device_obj.device_config.config_rules[:]
-                for json_config_rule in json_config_rules:
-                    device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
-                self.__task_executor.configure_device(device_obj)
+                if len(json_config_rules) > 0:
+                    del device_obj.device_config.config_rules[:]
+                    for json_config_rule in json_config_rules:
+                        device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                    self.__task_executor.configure_device(device_obj)
+
                 results.append(True)
             except Exception as e: # pylint: disable=broad-except
                 LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint)))
diff --git a/src/service/service/service_handlers/l2nm_ietfl2vpn/L2NM_IETFL2VPN_ServiceHandler.py b/src/service/service/service_handlers/l2nm_ietfl2vpn/L2NM_IETFL2VPN_ServiceHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e832516b4e8c12028dcf82681406940b248e0f4
--- /dev/null
+++ b/src/service/service/service_handlers/l2nm_ietfl2vpn/L2NM_IETFL2VPN_ServiceHandler.py
@@ -0,0 +1,173 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging
+from typing import Any, Dict, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from common.tools.object_factory.Device import json_device_id
+from common.type_checkers.Checkers import chk_type
+from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.service_handler_api.SettingsHandler import SettingsHandler
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l2nm_ietf_l2vpn'})
+
+class L2NM_IETFL2VPN_ServiceHandler(_ServiceHandler):
+    def __init__(   # pylint: disable=super-init-not-called
+        self, service : Service, task_executor : TaskExecutor, **settings
+    ) -> None:
+        self.__service = service
+        self.__task_executor = task_executor
+        self.__settings_handler = SettingsHandler(service.service_config, **settings)
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) < 2: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+        settings = self.__settings_handler.get('/settings')
+        json_settings : Dict = {} if settings is None else settings.value
+        encap_type = json_settings.get('encapsulation_type', '')
+        vlan_id    = json_settings.get('vlan_id', 100)
+
+        results = []
+        try:
+            src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0])
+            src_device = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid)))
+            src_endpoint = get_endpoint_matching(src_device, src_endpoint_uuid)
+            src_controller = self.__task_executor.get_device_controller(src_device)
+
+            dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[-1])
+            dst_device = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid)))
+            dst_endpoint = get_endpoint_matching(dst_device, dst_endpoint_uuid)
+            dst_controller = self.__task_executor.get_device_controller(dst_device)
+
+            if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid:
+                raise Exception('Different Src-Dst devices not supported by now')
+            controller = src_controller
+
+            json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), {
+                'uuid'              : service_uuid,
+                'src_device_name'   : src_device.name,
+                'src_endpoint_name' : src_endpoint.name,
+                'dst_device_name'   : dst_device.name,
+                'dst_endpoint_name' : dst_endpoint.name,
+                'encapsulation_type': encap_type,
+                'vlan_id'           : vlan_id,
+            })
+            del controller.device_config.config_rules[:]
+            controller.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(controller)
+            results.append(True)
+        except Exception as e: # pylint: disable=broad-except
+            LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid)))
+            results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) < 2: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+
+        results = []
+        try:
+            src_device_uuid, _ = get_device_endpoint_uuids(endpoints[0])
+            src_device = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid)))
+            src_controller = self.__task_executor.get_device_controller(src_device)
+
+            dst_device_uuid, _ = get_device_endpoint_uuids(endpoints[1])
+            dst_device = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid)))
+            dst_controller = self.__task_executor.get_device_controller(dst_device)
+
+            if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid:
+                raise Exception('Different Src-Dst devices not supported by now')
+            controller = src_controller
+
+            json_config_rule = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), {
+                'uuid': service_uuid
+            })
+            del controller.device_config.config_rules[:]
+            controller.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(controller)
+            results.append(True)
+        except Exception as e: # pylint: disable=broad-except
+            LOGGER.exception('Unable to DeleteEndpoint for Service({:s})'.format(str(service_uuid)))
+            results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                resource_value = json.loads(resource[1])
+                self.__settings_handler.set(resource[0], resource_value)
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                self.__settings_handler.delete(resource[0])
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
diff --git a/src/service/service/service_handlers/l2nm_ietfl2vpn/__init__.py b/src/service/service/service_handlers/l2nm_ietfl2vpn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/service/service/service_handlers/l2nm_ietfl2vpn/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py
index 07e78d73631342d101d77697098e83961c7dcf26..5afedb33dea6783af9cdb88b86bc186a279de9cc 100644
--- a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py
+++ b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py
@@ -20,16 +20,13 @@ def setup_config_rules(
     service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str,
     service_settings : TreeNode, endpoint_settings : TreeNode
 ) -> List[Dict]:
-    
+
     if service_settings  is None: return []
     if endpoint_settings is None: return []
 
-    json_settings          : Dict = service_settings.value
+    #json_settings          : Dict = service_settings.value
     json_endpoint_settings : Dict = endpoint_settings.value
 
-    json_settings          : Dict = {} if service_settings  is None else service_settings.value
-    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
-
     #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
     #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
     #bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
@@ -41,43 +38,32 @@ def setup_config_rules(
     vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
     #address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
     #address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
-    remote_router       = json_endpoint_settings.get('remote_router',       '5.5.5.5')  # '5.5.5.5'
-    circuit_id          = json_endpoint_settings.get('circuit_id',          '111'    )  # '111'
-    
+    remote_router       = json_endpoint_settings.get('remote_router',       '0.0.0.0')  # '5.5.5.5'
+    circuit_id          = json_endpoint_settings.get('circuit_id',          '000'    )  # '111'
 
     if_cirid_name         = '{:s}.{:s}'.format(endpoint_name, str(circuit_id))
     network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id))
     connection_point_id   = 'VC-1'
 
     json_config_rules = [
-        
+
         json_config_rule_set(
             '/network_instance[{:s}]'.format(network_instance_name),
-            {'name': network_instance_name, 
-             'type': 'L2VSI'}),
+            {'name': network_instance_name, 'type': 'L2VSI'}),
 
         json_config_rule_set(
-            '/interface[{:s}]/subinterface[0]'.format(if_cirid_name),
-            {'name': if_cirid_name, 
-             'type': 'l2vlan', 
-             'index': sub_interface_index, 
-             'vlan_id': vlan_id}),
+            '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index),
+            {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}),
 
         json_config_rule_set(
             '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
-            {'name': network_instance_name, 
-             'id': if_cirid_name, 
-             'interface': if_cirid_name,
-             'subinterface': 0
-            }),
+            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
+             'subinterface': sub_interface_index}),
 
         json_config_rule_set(
             '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
-            {'name': network_instance_name, 
-             'connection_point': connection_point_id, 
-             'VC_ID': circuit_id,
-             'remote_system': remote_router
-            }),
+            {'name': network_instance_name, 'connection_point': connection_point_id, 'VC_ID': circuit_id,
+             'remote_system': remote_router}),
     ]
     return json_config_rules
 
@@ -86,8 +72,11 @@ def teardown_config_rules(
     service_settings : TreeNode, endpoint_settings : TreeNode
 ) -> List[Dict]:
 
-    #json_settings          : Dict = {} if service_settings  is None else service_settings.value
-    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+    if service_settings  is None: return []
+    if endpoint_settings is None: return []
+
+    #json_settings          : Dict = service_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
 
     #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
     #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
@@ -96,7 +85,7 @@ def teardown_config_rules(
 
     #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
     #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
-    #sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+    sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
     #vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
     #address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
     #address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
@@ -105,17 +94,26 @@ def teardown_config_rules(
 
     if_cirid_name         = '{:s}.{:s}'.format(endpoint_name, str(circuit_id))
     network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id))
-    #connection_point_id   = 'VC-1'
+    connection_point_id   = 'VC-1'
 
     json_config_rules = [
+
+        #json_config_rule_delete(
+        #    '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
+        #    {'name': network_instance_name, 'connection_point': connection_point_id, 'VC_ID': circuit_id}),
+
+        #json_config_rule_delete(
+        #    '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
+        #    {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
+        #     'subinterface': sub_interface_index}),
+
         json_config_rule_delete(
             '/network_instance[{:s}]'.format(network_instance_name),
             {'name': network_instance_name}),
-        
+
         json_config_rule_delete(
-            '/interface[{:s}]/subinterface[0]'.format(if_cirid_name),{
-            'name': if_cirid_name,
-        }),
-        
+            '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index),
+            {'name': if_cirid_name, 'index': sub_interface_index}),
+
     ]
     return json_config_rules
diff --git a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py
index d511c8947ecb43052fd154ab3ce3293a468b4263..aae9e968b44af52170fdf6f6ecfab76fe90e2b52 100644
--- a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py
+++ b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py
@@ -14,7 +14,7 @@
 
 import json, logging
 from typing import Any, List, Optional, Tuple, Union
-from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.proto.context_pb2 import ConfigRule, DeviceId, Service
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
@@ -26,22 +26,7 @@ from .ConfigRules import setup_config_rules, teardown_config_rules
 
 LOGGER = logging.getLogger(__name__)
 
-HISTOGRAM_BUCKETS = (
-    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
-    0.0010, 0.0025, 0.0050, 0.0075,
-    0.0100, 0.0250, 0.0500, 0.0750,
-    0.1000, 0.2500, 0.5000, 0.7500,
-    1.0000, 2.5000, 5.0000, 7.5000,
-    10.0000, 25.000, 50.0000, 75.000,
-    100.0, INF
-)
 METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l2nm_openconfig'})
-METRICS_POOL.get_or_create('SetEndpoint',      MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteEndpoint',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConstraint',    MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
 
 class L2NMOpenConfigServiceHandler(_ServiceHandler):
     def __init__(   # pylint: disable=super-init-not-called
diff --git a/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py
index 903ad8cd5ae442a03d54fb49083f3837a3c8187c..f4a46112e778bd01aa76322384d8adee942aaa5b 100644
--- a/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py
+++ b/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py
@@ -21,8 +21,11 @@ def setup_config_rules(
     service_settings : TreeNode, endpoint_settings : TreeNode
 ) -> List[Dict]:
 
-    json_settings          : Dict = {} if service_settings  is None else service_settings.value
-    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+    if service_settings  is None: return []
+    if endpoint_settings is None: return []
+
+    json_settings          : Dict = service_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
 
     service_short_uuid        = service_uuid.split('-')[-1]
     network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
@@ -142,8 +145,11 @@ def teardown_config_rules(
     service_settings : TreeNode, endpoint_settings : TreeNode
 ) -> List[Dict]:
 
-    json_settings          : Dict = {} if service_settings  is None else service_settings.value
-    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+    if service_settings  is None: return []
+    if endpoint_settings is None: return []
+
+    json_settings          : Dict = service_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
 
     #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
     #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
diff --git a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py
index 47de9c94fbb8a5ddac848336c2ed7936d0126b45..de02a43caffd91ae047bd73d319e969af6265c5c 100644
--- a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py
+++ b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py
@@ -14,7 +14,7 @@
 
 import json, logging
 from typing import Any, List, Optional, Tuple, Union
-from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.proto.context_pb2 import ConfigRule, DeviceId, Service
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
@@ -26,22 +26,7 @@ from .ConfigRules import setup_config_rules, teardown_config_rules
 
 LOGGER = logging.getLogger(__name__)
 
-HISTOGRAM_BUCKETS = (
-    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
-    0.0010, 0.0025, 0.0050, 0.0075,
-    0.0100, 0.0250, 0.0500, 0.0750,
-    0.1000, 0.2500, 0.5000, 0.7500,
-    1.0000, 2.5000, 5.0000, 7.5000,
-    10.0000, 25.000, 50.0000, 75.000,
-    100.0, INF
-)
 METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_emulated'})
-METRICS_POOL.get_or_create('SetEndpoint',      MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteEndpoint',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConstraint',    MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
 
 class L3NMEmulatedServiceHandler(_ServiceHandler):
     def __init__(   # pylint: disable=super-init-not-called
@@ -75,10 +60,12 @@ class L3NMEmulatedServiceHandler(_ServiceHandler):
                     service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name,
                     settings, endpoint_settings)
 
-                del device_obj.device_config.config_rules[:]
-                for json_config_rule in json_config_rules:
-                    device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
-                self.__task_executor.configure_device(device_obj)
+                if len(json_config_rules) > 0:
+                    del device_obj.device_config.config_rules[:]
+                    for json_config_rule in json_config_rules:
+                        device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                    self.__task_executor.configure_device(device_obj)
+
                 results.append(True)
             except Exception as e: # pylint: disable=broad-except
                 LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint)))
@@ -110,10 +97,12 @@ class L3NMEmulatedServiceHandler(_ServiceHandler):
                     service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name,
                     settings, endpoint_settings)
 
-                del device_obj.device_config.config_rules[:]
-                for json_config_rule in json_config_rules:
-                    device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
-                self.__task_executor.configure_device(device_obj)
+                if len(json_config_rules) > 0:
+                    del device_obj.device_config.config_rules[:]
+                    for json_config_rule in json_config_rules:
+                        device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                    self.__task_executor.configure_device(device_obj)
+
                 results.append(True)
             except Exception as e: # pylint: disable=broad-except
                 LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint)))
diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py
index ef93dcdda8145cab15ff21c24b6318e9eb00e098..5d260bf86b82c66be8eb2f0caa683a72d8bd0ba5 100644
--- a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py
+++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py
@@ -187,8 +187,8 @@ def teardown_config_rules(
     if service_settings  is None: return []
     if endpoint_settings is None: return []
 
-    json_settings          : Dict = {} if service_settings  is None else service_settings.value
-    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+    json_settings          : Dict = service_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
 
     service_short_uuid        = service_uuid.split('-')[-1]
     network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
diff --git a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py
index b2639ddad58e4c453f1b1e2dc87fce8861ad79a2..b14a005e12947cc99b4d46ad0c58c9aae5778d05 100644
--- a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py
+++ b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py
@@ -14,7 +14,7 @@
 
 import json, logging
 from typing import Any, List, Optional, Tuple, Union
-from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.proto.context_pb2 import ConfigRule, DeviceId, Service
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
@@ -26,22 +26,7 @@ from .ConfigRules import setup_config_rules, teardown_config_rules
 
 LOGGER = logging.getLogger(__name__)
 
-HISTOGRAM_BUCKETS = (
-    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
-    0.0010, 0.0025, 0.0050, 0.0075,
-    0.0100, 0.0250, 0.0500, 0.0750,
-    0.1000, 0.2500, 0.5000, 0.7500,
-    1.0000, 2.5000, 5.0000, 7.5000,
-    10.0000, 25.000, 50.0000, 75.000,
-    100.0, INF
-)
 METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_openconfig'})
-METRICS_POOL.get_or_create('SetEndpoint',      MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteEndpoint',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConstraint',    MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
-METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
 
 class L3NMOpenConfigServiceHandler(_ServiceHandler):
     def __init__(   # pylint: disable=super-init-not-called
diff --git a/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py
index ee64d2fa4ff0110aea9ee4beee97fa83915ab57d..40c87eeee2c8dd1ddd5a39162f8ff7f117344e3b 100644
--- a/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py
+++ b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py
@@ -61,7 +61,7 @@ class MicrowaveServiceHandler(_ServiceHandler):
             device_uuid_dst, endpoint_uuid_dst = get_device_endpoint_uuids(endpoints[1])
 
             if device_uuid_src != device_uuid_dst:
-                raise Exception('Diferent Src-Dst devices not supported by now')
+                raise Exception('Different Src-Dst devices not supported by now')
             device_uuid = device_uuid_src
 
             device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
@@ -106,7 +106,7 @@ class MicrowaveServiceHandler(_ServiceHandler):
             device_uuid_dst, _ = get_device_endpoint_uuids(endpoints[1])
 
             if device_uuid_src != device_uuid_dst:
-                raise Exception('Diferent Src-Dst devices not supported by now')
+                raise Exception('Different Src-Dst devices not supported by now')
             device_uuid = device_uuid_src
 
             device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
diff --git a/src/service/service/service_handlers/p4/p4_service_handler.py b/src/service/service/service_handlers/p4/p4_service_handler.py
index 6f2cfb5a9bc4dac991eecd14ba7b6eb1218bdaa2..41cfcc5952601a16a13cd691f2e424017936aaa3 100644
--- a/src/service/service/service_handlers/p4/p4_service_handler.py
+++ b/src/service/service/service_handlers/p4/p4_service_handler.py
@@ -16,18 +16,20 @@
 P4 service handler for the TeraFlowSDN controller.
 """
 
-import anytree, json, logging
-from typing import Any, Dict, List, Optional, Tuple, Union
-from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service
-from common.tools.object_factory.ConfigRule import json_config_rule, json_config_rule_delete, json_config_rule_set
+import logging
+from typing import Any, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
 from common.tools.object_factory.Device import json_device_id
-from common.type_checkers.Checkers import chk_type, chk_length
+from common.type_checkers.Checkers import chk_type
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
-from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value
 from service.service.task_scheduler.TaskExecutor import TaskExecutor
 
 LOGGER = logging.getLogger(__name__)
 
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4'})
+
 def create_rule_set(endpoint_a, endpoint_b):
     return json_config_rule_set(
         'table',
@@ -99,6 +101,7 @@ class P4ServiceHandler(_ServiceHandler):
         self.__service = service
         self.__task_executor = task_executor # pylint: disable=unused-private-member
 
+    @metered_subclass_method(METRICS_POOL)
     def SetEndpoint(
         self, endpoints : List[Tuple[str, str, Optional[str]]],
         connection_uuid : Optional[str] = None
@@ -169,6 +172,7 @@ class P4ServiceHandler(_ServiceHandler):
 
         return results
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteEndpoint(
         self, endpoints : List[Tuple[str, str, Optional[str]]],
         connection_uuid : Optional[str] = None
@@ -239,6 +243,7 @@ class P4ServiceHandler(_ServiceHandler):
 
         return results
 
+    @metered_subclass_method(METRICS_POOL)
     def SetConstraint(self, constraints: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Create/Update service constraints.
@@ -261,6 +266,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(constraints)))
         return [True for _ in range(len(constraints))]
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Delete service constraints.
@@ -285,6 +291,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(constraints)))
         return [True for _ in range(len(constraints))]
 
+    @metered_subclass_method(METRICS_POOL)
     def SetConfig(self, resources: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Create/Update configuration for a list of service resources.
@@ -308,6 +315,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(resources)))
         return [True for _ in range(len(resources))]
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteConfig(self, resources: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Delete configuration for a list of service resources.
diff --git a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py
index 8abd12b2a24c49a6c5e50cebe7a2d65dc7ce4eb1..af7d4bc949fc98f057ade66b58d8b9b38e0707ed 100644
--- a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py
+++ b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py
@@ -19,7 +19,7 @@ from common.proto.context_pb2 import ConfigRule, DeviceId, Service
 from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
-from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
+from service.service.service_handler_api.Tools import get_device_endpoint_uuids
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 from service.service.service_handler_api.SettingsHandler import SettingsHandler
 from service.service.task_scheduler.TaskExecutor import TaskExecutor
@@ -42,7 +42,7 @@ class TapiServiceHandler(_ServiceHandler):
     ) -> List[Union[bool, Exception]]:
 
         chk_type('endpoints', endpoints, list)
-        if len(endpoints) != 2: return []
+        if len(endpoints) < 2: return []
 
         service_uuid = self.__service.service_id.service_uuid.uuid
         settings = self.__settings_handler.get('/settings')
@@ -55,30 +55,33 @@ class TapiServiceHandler(_ServiceHandler):
 
         results = []
         try:
-            device_uuid_src, endpoint_uuid_src = get_device_endpoint_uuids(endpoints[0])
-            device_uuid_dst, endpoint_uuid_dst = get_device_endpoint_uuids(endpoints[1])
+            src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0])
+            src_device = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid)))
+            src_controller = self.__task_executor.get_device_controller(src_device)
+            if src_controller is None: src_controller = src_device
 
-            if device_uuid_src != device_uuid_dst:
-                raise Exception('Diferent Src-Dst devices not supported by now')
-            device_uuid = device_uuid_src
+            dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[-1])
+            dst_device = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid)))
+            dst_controller = self.__task_executor.get_device_controller(dst_device)
+            if dst_controller is None: dst_controller = dst_device
 
-            device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
-            endpoint_name_src = get_endpoint_matching(device_obj, endpoint_uuid_src).name
-            endpoint_name_dst = get_endpoint_matching(device_obj, endpoint_uuid_dst).name
+            if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid:
+                raise Exception('Different Src-Dst devices not supported by now')
+            controller = src_controller
 
             json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), {
                 'uuid'                    : service_uuid,
-                'input_sip'               : endpoint_name_src,
-                'output_sip'              : endpoint_name_dst,
+                'input_sip_uuid'          : src_endpoint_uuid,
+                'output_sip_uuid'         : dst_endpoint_uuid,
                 'capacity_unit'           : capacity_unit,
                 'capacity_value'          : capacity_value,
                 'layer_protocol_name'     : layer_proto_name,
                 'layer_protocol_qualifier': layer_proto_qual,
                 'direction'               : direction,
             })
-            del device_obj.device_config.config_rules[:]
-            device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
-            self.__task_executor.configure_device(device_obj)
+            del controller.device_config.config_rules[:]
+            controller.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(controller)
             results.append(True)
         except Exception as e: # pylint: disable=broad-except
             LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid)))
@@ -92,27 +95,32 @@ class TapiServiceHandler(_ServiceHandler):
     ) -> List[Union[bool, Exception]]:
 
         chk_type('endpoints', endpoints, list)
-        if len(endpoints) != 2: return []
+        if len(endpoints) < 2: return []
 
         service_uuid = self.__service.service_id.service_uuid.uuid
 
         results = []
         try:
-            device_uuid_src, _ = get_device_endpoint_uuids(endpoints[0])
-            device_uuid_dst, _ = get_device_endpoint_uuids(endpoints[1])
+            src_device_uuid, _ = get_device_endpoint_uuids(endpoints[0])
+            src_device = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid)))
+            src_controller = self.__task_executor.get_device_controller(src_device)
+            if src_controller is None: src_controller = src_device
 
-            if device_uuid_src != device_uuid_dst:
-                raise Exception('Diferent Src-Dst devices not supported by now')
-            device_uuid = device_uuid_src
+            dst_device_uuid, _ = get_device_endpoint_uuids(endpoints[1])
+            dst_device = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid)))
+            dst_controller = self.__task_executor.get_device_controller(dst_device)
+            if dst_controller is None: dst_controller = dst_device
 
-            device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+            if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid:
+                raise Exception('Different Src-Dst devices not supported by now')
+            controller = src_controller
 
             json_config_rule = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), {
                 'uuid': service_uuid
             })
-            del device_obj.device_config.config_rules[:]
-            device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
-            self.__task_executor.configure_device(device_obj)
+            del controller.device_config.config_rules[:]
+            controller.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(controller)
             results.append(True)
         except Exception as e: # pylint: disable=broad-except
             LOGGER.exception('Unable to DeleteEndpoint for Service({:s})'.format(str(service_uuid)))
diff --git a/src/service/service/service_handlers/tapi_xr/TapiXrServiceHandler.py b/src/service/service/service_handlers/tapi_xr/TapiXrServiceHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..a79ebb82d6cb57967e96b70e0144fd410f8666b0
--- /dev/null
+++ b/src/service/service/service_handlers/tapi_xr/TapiXrServiceHandler.py
@@ -0,0 +1,190 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging
+from typing import Any, Dict, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from common.tools.object_factory.Device import json_device_id
+from common.type_checkers.Checkers import chk_type
+from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.service_handler_api.SettingsHandler import SettingsHandler
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'tapi_xr'})
+
+class TapiXrServiceHandler(_ServiceHandler):
+    def __init__(   # pylint: disable=super-init-not-called
+        self, service : Service, task_executor : TaskExecutor, **settings
+    ) -> None:
+        self.__service = service
+        self.__task_executor = task_executor
+        self.__settings_handler = SettingsHandler(service.service_config, **settings)
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+
+        chk_type('endpoints', endpoints, list)
+
+        # When using regular mode where XR constellation is a single device, we get two endpoints.
+        # Convert that representation to a form that is understood by the service handler that
+        # expects constellation to be represented as multiple devices.
+        if len(endpoints) == 2:
+            endpoints = [None, endpoints[0], endpoints[1], None]
+
+        if len(endpoints) != 4: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+        settings = self.__settings_handler.get('/settings')
+        json_settings : Dict = {} if settings is None else settings.value
+        capacity_value   = json_settings.get('capacity_value', 50.0)
+        capacity_unit    = json_settings.get('capacity_unit',  'GHz')
+
+        results = []
+        try:
+            src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[1])
+            src_device = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid)))
+            src_endpoint = get_endpoint_matching(src_device, src_endpoint_uuid)
+            src_controller = self.__task_executor.get_device_controller(src_device)
+            if src_controller is None: src_controller = src_device
+
+            dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[2])
+            dst_device = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid)))
+            dst_endpoint = get_endpoint_matching(dst_device, dst_endpoint_uuid)
+            dst_controller = self.__task_executor.get_device_controller(dst_device)
+            if dst_controller is None: dst_controller = dst_device
+
+            if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid:
+                raise Exception('Different Src-Dst devices not supported by now')
+            controller = src_controller
+
+            # If the special mode that splits XR constellation to multiple modelled devices is used,
+            # add the device name to interface name. Otherwise use it as is (it will already contain pipe character
+            # end edge device name). This code should be refactored, as interface name structure is internal matter
+            # to XR driver and subject to change.
+            constellation_unique_src =  src_endpoint.name if "|" in src_endpoint.name else '|'.join([src_device.name, src_endpoint.name])
+            constellation_unique_dst =  dst_endpoint.name if "|" in dst_endpoint.name else '|'.join([dst_device.name, dst_endpoint.name])
+
+            json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), {
+                'uuid'           : service_uuid,
+                'input_sip_name' : constellation_unique_src,
+                'output_sip_name': constellation_unique_dst,
+                'capacity_unit'  : capacity_unit,
+                'capacity_value' : capacity_value,
+            })
+
+            del controller.device_config.config_rules[:]
+            controller.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(controller)
+            results.append(True)
+        except Exception as e: # pylint: disable=broad-except
+            LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid)))
+            results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) < 2: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+
+        results = []
+        try:
+            src_device_uuid, _ = get_device_endpoint_uuids(endpoints[0])
+            src_device = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid)))
+            src_controller = self.__task_executor.get_device_controller(src_device)
+            if src_controller is None: src_controller = src_device
+
+            dst_device_uuid, _ = get_device_endpoint_uuids(endpoints[1])
+            dst_device = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid)))
+            dst_controller = self.__task_executor.get_device_controller(dst_device)
+            if dst_controller is None: dst_controller = dst_device
+
+            if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid:
+                raise Exception('Different Src-Dst devices not supported by now')
+            controller = src_controller
+
+            json_config_rule = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), {
+                'uuid': service_uuid
+            })
+            del controller.device_config.config_rules[:]
+            controller.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(controller)
+            results.append(True)
+        except Exception as e: # pylint: disable=broad-except
+            LOGGER.exception('Unable to DeleteEndpoint for Service({:s})'.format(str(service_uuid)))
+            results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                resource_value = json.loads(resource[1])
+                self.__settings_handler.set(resource[0], resource_value)
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                self.__settings_handler.delete(resource[0])
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
diff --git a/src/service/service/service_handlers/tapi_xr/__init__.py b/src/service/service/service_handlers/tapi_xr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/service/service/service_handlers/tapi_xr/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py
index 932c56e2b1934e12e7849a60c22d3ca1be7f8093..96751e83770e1b98df4770cf74bb453f6a0519ef 100644
--- a/src/service/service/task_scheduler/TaskExecutor.py
+++ b/src/service/service/task_scheduler/TaskExecutor.py
@@ -12,14 +12,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import json
 from enum import Enum
 from typing import TYPE_CHECKING, Any, Dict, Optional, Union
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
+from common.tools.context_queries.Connection import get_connection_by_id
+from common.tools.context_queries.Device import get_device
+from common.tools.context_queries.Service import get_service_by_id
+from common.tools.object_factory.Device import json_device_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class
-from service.service.tools.ContextGetters import get_connection, get_device, get_service
 from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key
 
 if TYPE_CHECKING:
@@ -70,7 +74,7 @@ class TaskExecutor:
         connection_key = get_connection_key(connection_id)
         connection = self._load_grpc_object(CacheableObjectType.CONNECTION, connection_key)
         if connection is None:
-            connection = get_connection(self._context_client, connection_id)
+            connection = get_connection_by_id(self._context_client, connection_id)
             if connection is None: raise NotFoundException('Connection', connection_key)
             connection : Connection = self._store_editable_grpc_object(
                 CacheableObjectType.CONNECTION, connection_key, Connection, connection)
@@ -92,7 +96,7 @@ class TaskExecutor:
         device_key = get_device_key(device_id)
         device = self._load_grpc_object(CacheableObjectType.DEVICE, device_key)
         if device is None:
-            device = get_device(self._context_client, device_id)
+            device = get_device(self._context_client, device_id.device_uuid.uuid)
             if device is None: raise NotFoundException('Device', device_key)
             device : Device = self._store_editable_grpc_object(
                 CacheableObjectType.DEVICE, device_key, Device, device)
@@ -103,13 +107,38 @@ class TaskExecutor:
         self._device_client.ConfigureDevice(device)
         self._store_grpc_object(CacheableObjectType.DEVICE, device_key, device)
 
-    def get_devices_from_connection(self, connection : Connection) -> Dict[str, Device]:
+    def get_device_controller(self, device : Device) -> Optional[Device]:
+        json_controller = None
+        for config_rule in device.device_config.config_rules:
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            if config_rule.custom.resource_key != '_controller': continue
+            json_controller = json.loads(config_rule.custom.resource_value)
+            break
+
+        if json_controller is None: return None
+
+        controller_uuid = json_controller['uuid']
+        controller = self.get_device(DeviceId(**json_device_id(controller_uuid)))
+        controller_uuid = controller.device_id.device_uuid.uuid
+        if controller is None: raise Exception('Device({:s}) not found'.format(str(controller_uuid)))
+        return controller
+
+    def get_devices_from_connection(
+        self, connection : Connection, exclude_managed_by_controller : bool = False
+    ) -> Dict[str, Device]:
         devices = dict()
         for endpoint_id in connection.path_hops_endpoint_ids:
             device = self.get_device(endpoint_id.device_id)
             device_uuid = endpoint_id.device_id.device_uuid.uuid
             if device is None: raise Exception('Device({:s}) not found'.format(str(device_uuid)))
-            devices[device_uuid] = device
+
+            controller = self.get_device_controller(device)
+            if controller is None:
+                devices[device_uuid] = device
+            else:
+                if not exclude_managed_by_controller:
+                    devices[device_uuid] = device
+                devices[controller.device_id.device_uuid.uuid] = controller
         return devices
 
     # ----- Service-related methods ------------------------------------------------------------------------------------
@@ -118,7 +147,7 @@ class TaskExecutor:
         service_key = get_service_key(service_id)
         service = self._load_grpc_object(CacheableObjectType.SERVICE, service_key)
         if service is None:
-            service = get_service(self._context_client, service_id)
+            service = get_service_by_id(self._context_client, service_id)
             if service is None: raise NotFoundException('Service', service_key)
             service : service = self._store_editable_grpc_object(
                 CacheableObjectType.SERVICE, service_key, Service, service)
@@ -139,6 +168,6 @@ class TaskExecutor:
     def get_service_handler(
         self, connection : Connection, service : Service, **service_handler_settings
     ) -> '_ServiceHandler':
-        connection_devices = self.get_devices_from_connection(connection)
+        connection_devices = self.get_devices_from_connection(connection, exclude_managed_by_controller=True)
         service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices)
         return service_handler_class(service, self, **service_handler_settings)
diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
index 5a47005b304836050dd8c0882214dd9cebd5d8b5..4367ffdee4d6d5b9edfc9fd30d0d6b6f48da8a75 100644
--- a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
+++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
@@ -32,7 +32,7 @@ class Task_ConnectionConfigure(_Task):
     def connection_id(self) -> ConnectionId: return self._connection_id
 
     @staticmethod
-    def build_key(connection_id : ConnectionId) -> str:
+    def build_key(connection_id : ConnectionId) -> str: # pylint: disable=arguments-differ
         str_connection_id = get_connection_key(connection_id)
         return KEY_TEMPLATE.format(connection_id=str_connection_id)
 
diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
index 5736054febd2fb9e8a36b5a2235ca3f412e0e174..70f41566ef5e69605a527cc0392b77acb866ec2c 100644
--- a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
+++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
@@ -32,7 +32,7 @@ class Task_ConnectionDeconfigure(_Task):
     def connection_id(self) -> ConnectionId: return self._connection_id
 
     @staticmethod
-    def build_key(connection_id : ConnectionId) -> str:
+    def build_key(connection_id : ConnectionId) -> str: # pylint: disable=arguments-differ
         str_connection_id = get_connection_key(connection_id)
         return KEY_TEMPLATE.format(connection_id=str_connection_id)
 
diff --git a/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py b/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py
index 6a4e11b540cd9b85028d92cf86899ee098056c36..0f021b6ca65da1c6b5e44d8577bf9dd6875eb17a 100644
--- a/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py
+++ b/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py
@@ -28,7 +28,7 @@ class Task_ServiceDelete(_Task):
     def service_id(self) -> ServiceId: return self._service_id
 
     @staticmethod
-    def build_key(service_id : ServiceId) -> str:
+    def build_key(service_id : ServiceId) -> str:   # pylint: disable=arguments-differ
         str_service_id = get_service_key(service_id)
         return KEY_TEMPLATE.format(service_id=str_service_id)
 
diff --git a/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py b/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py
index 815cb33c3d540755704153b661e889fc2660d268..d5360fe85eae68085298406fc0ed19dd105f187e 100644
--- a/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py
+++ b/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py
@@ -32,7 +32,7 @@ class Task_ServiceSetStatus(_Task):
     def new_status(self) -> ServiceStatusEnum: return self._new_status
 
     @staticmethod
-    def build_key(service_id : ServiceId, new_status : ServiceStatusEnum) -> str:
+    def build_key(service_id : ServiceId, new_status : ServiceStatusEnum) -> str:   # pylint: disable=arguments-differ
         str_service_id = get_service_key(service_id)
         str_new_status = ServiceStatusEnum.Name(new_status)
         return KEY_TEMPLATE.format(service_id=str_service_id, new_status=str_new_status)
diff --git a/src/service/service/tools/ContextGetters.py b/src/service/service/tools/ContextGetters.py
deleted file mode 100644
index 9b1d6224d1e4201cbc0720e7ce818a86e5ae2042..0000000000000000000000000000000000000000
--- a/src/service/service/tools/ContextGetters.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import grpc
-from typing import Optional
-from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
-from context.client.ContextClient import ContextClient
-
-def get_connection(context_client : ContextClient, connection_id : ConnectionId) -> Optional[Connection]:
-    try:
-        connection : Connection = context_client.GetConnection(connection_id)
-        return connection
-    except grpc.RpcError as e:
-        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
-        return None
-
-def get_device(context_client : ContextClient, device_id : DeviceId) -> Optional[Device]:
-    try:
-        device : Device = context_client.GetDevice(device_id)
-        return device
-    except grpc.RpcError as e:
-        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
-        return None
-
-def get_service(context_client : ContextClient, service_id : ServiceId) -> Optional[Service]:
-    try:
-        service : Service = context_client.GetService(service_id)
-        return service
-    except grpc.RpcError as e:
-        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
-        return None
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index acec3ae303266714ae7f50c5c0d78fc41d350ea1..f91c55e281e8ed5f994dea3dce43a63184669795 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -19,7 +19,7 @@ from common.proto.context_pb2 import (
 from common.proto.slice_pb2_grpc import SliceServiceServicer
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.tools.context_queries.InterDomain import is_multi_domain
-from common.tools.context_queries.Slice import get_slice
+from common.tools.context_queries.Slice import get_slice_by_id
 from common.tools.grpc.ConfigRules import copy_config_rules
 from common.tools.grpc.Constraints import copy_constraints
 from common.tools.grpc.EndPointIds import copy_endpoint_ids
@@ -44,9 +44,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         # Set slice status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the slice is
         # being modified.
         context_client = ContextClient()
-        slice_ro : Optional[Service] = get_slice(
-            context_client, request.slice_id.slice_uuid.uuid, request.slice_id.context_id.context_uuid.uuid,
-            rw_copy=False)
+        slice_ro : Optional[Slice] = get_slice_by_id(context_client, request.slice_id, rw_copy=False)
 
         slice_rw = Slice()
         slice_rw.CopyFrom(request if slice_ro is None else slice_ro)
diff --git a/src/slice/service/slice_grouper/SliceGrouper.py b/src/slice/service/slice_grouper/SliceGrouper.py
index 735d028993eb11e83138caebde1e32ebc830093f..2f1a791819f6a8d0951e9e93ca22d071ea66c1f7 100644
--- a/src/slice/service/slice_grouper/SliceGrouper.py
+++ b/src/slice/service/slice_grouper/SliceGrouper.py
@@ -29,6 +29,7 @@ class SliceGrouper:
     def __init__(self) -> None:
         self._lock = threading.Lock()
         self._is_enabled = is_slice_grouping_enabled()
+        LOGGER.info('Slice Grouping: {:s}'.format('ENABLED' if self._is_enabled else 'DISABLED'))
         if not self._is_enabled: return
 
         metrics_exporter = MetricsExporter()
diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py
index ca957f3c7760eb65b649d22ecb5b57dee3e08dab..c815a19d5477ec82c2c2702ba58bb5b092144692 100644
--- a/src/slice/service/slice_grouper/Tools.py
+++ b/src/slice/service/slice_grouper/Tools.py
@@ -18,7 +18,7 @@ from common.Settings import get_setting
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum
 from common.tools.context_queries.Context import create_context
-from common.tools.context_queries.Slice import get_slice
+from common.tools.context_queries.Slice import get_slice_by_uuid
 from context.client.ContextClient import ContextClient
 from slice.service.slice_grouper.MetricsExporter import MetricsExporter
 
@@ -70,7 +70,7 @@ def create_slice_groups(
     slice_group_ids : Dict[str, SliceId] = dict()
     for slice_group in slice_groups:
         slice_group_name = slice_group[0]
-        slice_group_obj = get_slice(context_client, slice_group_name, DEFAULT_CONTEXT_NAME)
+        slice_group_obj = get_slice_by_uuid(context_client, slice_group_name, DEFAULT_CONTEXT_NAME)
         if slice_group_obj is None:
             slice_group_obj = create_slice_group(
                 DEFAULT_CONTEXT_NAME, slice_group_name, slice_group[2], slice_group[1])
@@ -111,7 +111,7 @@ def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, flo
     slice_uuid = slice_obj.slice_id.slice_uuid.uuid
 
     context_client = ContextClient()
-    slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
+    slice_group_obj = get_slice_by_uuid(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
     if slice_group_obj is None:
         raise NotFoundException('Slice', group_name, extra_details='while adding to group')
 
@@ -148,7 +148,7 @@ def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float
     slice_uuid = slice_obj.slice_id.slice_uuid.uuid
 
     context_client = ContextClient()
-    slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
+    slice_group_obj = get_slice_by_uuid(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
     if slice_group_obj is None:
         raise NotFoundException('Slice', group_name, extra_details='while removing from group')
 
diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json
index aa76edecd116ee7336fc1a2621d2bc3ae95080ce..b68b9636d58d9c80c4774e4ade557f83796ac5b5 100644
--- a/src/tests/ofc22/descriptors_emulated.json
+++ b/src/tests/ofc22/descriptors_emulated.json
@@ -97,6 +97,35 @@
                 {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}},
                 {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
             ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R1-EMU/13/0/0/aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+                {"device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R2-EMU/13/0/0/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+                {"device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R3-EMU/13/0/0/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+                {"device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R4-EMU/13/0/0/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}},
+                {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
         }
     ]
 }
\ No newline at end of file
diff --git a/src/tests/ofc23/.gitignore b/src/tests/ofc23/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0a3f4400d5c88b1af32c7667d69d2fdc12d5424e
--- /dev/null
+++ b/src/tests/ofc23/.gitignore
@@ -0,0 +1,2 @@
+# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
+descriptors_real.json
diff --git a/src/tests/ofc23/MultiIngressController.txt b/src/tests/ofc23/MultiIngressController.txt
new file mode 100644
index 0000000000000000000000000000000000000000..190e6df7425983db43d8b1888f29861ec9056ed6
--- /dev/null
+++ b/src/tests/ofc23/MultiIngressController.txt
@@ -0,0 +1,23 @@
+# Ref: https://kubernetes.github.io/ingress-nginx/user-guide/multiple-ingress/
+# Ref: https://fabianlee.org/2021/07/29/kubernetes-microk8s-with-multiple-metallb-endpoints-and-nginx-ingress-controllers/
+
+# Check node limits
+kubectl describe nodes
+
+# Create secondary ingress controllers
+kubectl apply -f ofc23/nginx-ingress-controller-parent.yaml
+kubectl apply -f ofc23/nginx-ingress-controller-child.yaml
+
+# Delete secondary ingress controllers
+kubectl delete -f ofc23/nginx-ingress-controller-parent.yaml
+kubectl delete -f ofc23/nginx-ingress-controller-child.yaml
+
+source ofc23/deploy_specs_parent.sh
+./deploy/all.sh
+
+source ofc23/deploy_specs_child.sh
+./deploy/all.sh
+
+# Manually deploy ingresses for instances
+kubectl --namespace tfs-parent apply -f ofc23/tfs-ingress-parent.yaml
+kubectl --namespace tfs-child apply -f ofc23/tfs-ingress-child.yaml
diff --git a/src/tests/ofc23/__init__.py b/src/tests/ofc23/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/tests/ofc23/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/ofc23/delete_hierar.sh b/src/tests/ofc23/delete_hierar.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4a03dad1cc29cff72347f68bc7b1a082924a9211
--- /dev/null
+++ b/src/tests/ofc23/delete_hierar.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-parent tfs-child
+
+# Delete secondary ingress controllers
+kubectl delete -f ofc23/nginx-ingress-controller-parent.yaml
+kubectl delete -f ofc23/nginx-ingress-controller-child.yaml
diff --git a/src/tests/ofc23/delete_sligrp.sh b/src/tests/ofc23/delete_sligrp.sh
new file mode 100755
index 0000000000000000000000000000000000000000..cce0bd53febc4765f9d455619f49ea4de8dfe870
--- /dev/null
+++ b/src/tests/ofc23/delete_sligrp.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs
diff --git a/src/tests/ofc23/deploy_child.sh b/src/tests/ofc23/deploy_child.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9b05ed88739114bf9029d8afaf491d7fec726bff
--- /dev/null
+++ b/src/tests/ofc23/deploy_child.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-child
+
+# Delete secondary ingress controllers
+kubectl delete -f ofc23/nginx-ingress-controller-child.yaml
+
+# Create secondary ingress controllers
+kubectl apply -f ofc23/nginx-ingress-controller-child.yaml
+
+# Deploy TFS for Child
+source ofc23/deploy_specs_child.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_child.sh
diff --git a/src/tests/ofc23/deploy_hierar.sh b/src/tests/ofc23/deploy_hierar.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4874688ad7561156b1b5fc4c80b72a9745feb6a0
--- /dev/null
+++ b/src/tests/ofc23/deploy_hierar.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-parent tfs-child
+
+# Delete secondary ingress controllers
+kubectl delete -f ofc23/nginx-ingress-controller-parent.yaml
+kubectl delete -f ofc23/nginx-ingress-controller-child.yaml
+
+# Create secondary ingress controllers
+kubectl apply -f ofc23/nginx-ingress-controller-parent.yaml
+kubectl apply -f ofc23/nginx-ingress-controller-child.yaml
+
+# Deploy TFS for Parent
+source ofc23/deploy_specs_parent.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_parent.sh
+
+# Deploy TFS for Child
+source ofc23/deploy_specs_child.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_child.sh
diff --git a/src/tests/ofc23/deploy_parent.sh b/src/tests/ofc23/deploy_parent.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ac4a2954213cf577efe1b1a8e499635d80ea3548
--- /dev/null
+++ b/src/tests/ofc23/deploy_parent.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-parent
+
+# Delete secondary ingress controllers
+kubectl delete -f ofc23/nginx-ingress-controller-parent.yaml
+
+# Create secondary ingress controllers
+kubectl apply -f ofc23/nginx-ingress-controller-parent.yaml
+
+# Deploy TFS for Parent
+source ofc23/deploy_specs_parent.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_parent.sh
diff --git a/src/tests/ofc23/deploy_sligrp.sh b/src/tests/ofc23/deploy_sligrp.sh
new file mode 100755
index 0000000000000000000000000000000000000000..62a9df5cf006af856f168add4058d63eaa905784
--- /dev/null
+++ b/src/tests/ofc23/deploy_sligrp.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Delete old namespaces
+kubectl delete namespace tfs-sligrp
+
+# Deploy TFS for Slice Goruping
+source ofc23/deploy_specs_sligrp.sh
+./deploy/all.sh
+mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_sligrp.sh
diff --git a/src/tests/ofc23/deploy_specs_child.sh b/src/tests/ofc23/deploy_specs_child.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4d2b3502294925d82f675263fd6bddea62ec181a
--- /dev/null
+++ b/src/tests/ofc23/deploy_specs_child.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#automation monitoring load_generator
+export TFS_COMPONENTS="context device pathcomp service slice compute webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs-child"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="ofc23/tfs-ingress-child.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_child"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-child"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4224"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8224"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-child"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8814"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9012"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9002"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
diff --git a/src/tests/ofc23/deploy_specs_parent.sh b/src/tests/ofc23/deploy_specs_parent.sh
new file mode 100755
index 0000000000000000000000000000000000000000..808f4e28734be71e6eb7fb2aced39211fd8e7f24
--- /dev/null
+++ b/src/tests/ofc23/deploy_specs_parent.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#automation monitoring load_generator
+export TFS_COMPONENTS="context device pathcomp service slice compute webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs-parent"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="ofc23/tfs-ingress-parent.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_parent"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-parent"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4223"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8223"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-parent"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8813"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9011"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9001"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
diff --git a/src/tests/ofc23/deploy_specs_sligrp.sh b/src/tests/ofc23/deploy_specs_sligrp.sh
new file mode 100755
index 0000000000000000000000000000000000000000..90bea4567bd35d845abf943670f8aa33070dff57
--- /dev/null
+++ b/src/tests/ofc23/deploy_specs_sligrp.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#automation monitoring load_generator
+export TFS_COMPONENTS="context device pathcomp service slice webui load_generator"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs-sligrp"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_sligrp"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-sligrp"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4222"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8222"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-sligrp"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8812"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9010"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9000"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
diff --git a/src/tests/ofc23/descriptors/adva-interfaces.txt b/src/tests/ofc23/descriptors/adva-interfaces.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a634735058aa9490ddbd43e8e9a0752fbd4a6ee6
--- /dev/null
+++ b/src/tests/ofc23/descriptors/adva-interfaces.txt
@@ -0,0 +1,89 @@
+R199
+eth-1/0/1
+eth-1/0/10
+eth-1/0/11
+eth-1/0/12
+eth-1/0/13
+eth-1/0/14
+eth-1/0/15
+eth-1/0/16
+eth-1/0/17
+eth-1/0/18
+eth-1/0/19
+eth-1/0/2
+eth-1/0/20
+eth-1/0/21
+eth-1/0/22
+eth-1/0/23
+eth-1/0/24
+eth-1/0/25
+eth-1/0/26
+eth-1/0/27
+eth-1/0/28
+eth-1/0/29
+eth-1/0/3
+eth-1/0/30
+eth-1/0/4
+eth-1/0/5
+eth-1/0/6
+eth-1/0/7
+eth-1/0/8
+eth-1/0/9
+
+R155
+eth-1/0/1
+eth-1/0/10
+eth-1/0/11
+eth-1/0/12
+eth-1/0/13
+eth-1/0/14
+eth-1/0/15
+eth-1/0/16
+eth-1/0/17
+eth-1/0/18
+eth-1/0/19
+eth-1/0/2
+eth-1/0/20
+eth-1/0/21
+eth-1/0/22
+eth-1/0/23
+eth-1/0/24
+eth-1/0/25
+eth-1/0/26
+eth-1/0/27
+eth-1/0/3
+eth-1/0/4
+eth-1/0/5
+eth-1/0/6
+eth-1/0/7
+eth-1/0/8
+eth-1/0/9
+
+R149
+eth-1/0/1
+eth-1/0/10
+eth-1/0/11
+eth-1/0/12
+eth-1/0/13
+eth-1/0/14
+eth-1/0/15
+eth-1/0/16
+eth-1/0/17
+eth-1/0/18
+eth-1/0/19
+eth-1/0/2
+eth-1/0/20
+eth-1/0/21
+eth-1/0/22
+eth-1/0/23
+eth-1/0/24
+eth-1/0/25
+eth-1/0/26
+eth-1/0/27
+eth-1/0/3
+eth-1/0/4
+eth-1/0/5
+eth-1/0/6
+eth-1/0/7
+eth-1/0/8
+eth-1/0/9
diff --git a/src/tests/ofc23/descriptors/backup/dc-2-dc-service.json b/src/tests/ofc23/descriptors/backup/dc-2-dc-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..3a83afa6de81f137204aecc5f0eca476aad71e61
--- /dev/null
+++ b/src/tests/ofc23/descriptors/backup/dc-2-dc-service.json
@@ -0,0 +1,37 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "dc-2-dc-svc"}
+            },
+            "service_type": 2,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}},
+                {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}}
+            ],
+            "service_constraints": [
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 15.2}}
+            ],
+            "service_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {
+                    "address_families": ["IPV4"], "bgp_as": 65000, "bgp_route_target": "65000:123",
+                    "mtu": 1512, "vlan_id": 111
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[R149]/endpoint[eth-1/0/22]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "5.5.5.5",
+                    "address_ip": "172.16.4.1", "address_prefix": 24, "sub_interface_index": 0, "vlan_id": 111
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[R155]/endpoint[eth-1/0/22]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "5.5.5.1",
+                    "address_ip": "172.16.2.1", "address_prefix": 24, "sub_interface_index": 0, "vlan_id": 111
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[R199]/endpoint[eth-1/0/21]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "5.5.5.6",
+                    "address_ip": "172.16.1.1", "address_prefix": 24, "sub_interface_index": 0, "vlan_id": 111
+                }}}
+            ]}
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/backup/descriptor_child.json b/src/tests/ofc23/descriptors/backup/descriptor_child.json
new file mode 100644
index 0000000000000000000000000000000000000000..eea9571531cfbebfcc53dba0679d1bd1b6900b2f
--- /dev/null
+++ b/src/tests/ofc23/descriptors/backup/descriptor_child.json
@@ -0,0 +1,183 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "R199"}}, "device_type": "packet-router", "device_drivers": [1],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.95.86.199"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "830"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin",
+                    "force_running": false, "hostkey_verify": false, "look_for_keys": false,
+                    "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "huaweiyang"},
+                    "manager_params": {"timeout" : 120},
+                    "endpoints": [
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/1"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/2"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/3"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/4"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/5"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/6"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/7"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/8"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/9"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/10"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/11"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/12"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/13"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/14"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/15"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/16"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/17"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/18"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/19"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/20"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/21"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/22"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/23"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/24"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/25"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/26"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/27"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/28"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/29"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/30"}
+                    ]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R155"}}, "device_type": "packet-router", "device_drivers": [1],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.95.86.155"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "830"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin",
+                    "force_running": false, "hostkey_verify": false, "look_for_keys": false,
+                    "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "huaweiyang"},
+                    "manager_params": {"timeout" : 120},
+                    "endpoints": [
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/1"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/2"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/3"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/4"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/5"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/6"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/7"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/8"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/9"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/10"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/11"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/12"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/13"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/14"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/15"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/16"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/17"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/18"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/19"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/20"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/21"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/22"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/23"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/24"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/25"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/26"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/27"}
+                    ]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R149"}}, "device_type": "packet-router", "device_drivers": [1],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.95.86.149"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "830"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin",
+                    "force_running": false, "hostkey_verify": false, "look_for_keys": false,
+                    "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "huaweiyang"},
+                    "manager_params": {"timeout" : 120},
+                    "endpoints": [
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/1"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/2"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/3"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/4"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/5"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/6"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/7"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/8"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/9"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/10"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/11"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/12"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/13"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/14"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/15"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/16"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/17"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/18"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/19"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/20"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/21"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/22"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/23"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/24"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/25"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/26"},
+                        {"sample_types": [], "type": "copper/internal", "uuid": "eth-1/0/27"}
+                    ]
+                }}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/19==R155/eth-1/0/19"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/19==R199/eth-1/0/19"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/20==R149/eth-1/0/20"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/20==R199/eth-1/0/20"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/25==R155/eth-1/0/25"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/25==R149/eth-1/0/25"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/backup/descriptor_parent.json b/src/tests/ofc23/descriptors/backup/descriptor_parent.json
new file mode 100644
index 0000000000000000000000000000000000000000..42b60e3cf09285955fbfbc567d977e60f78956be
--- /dev/null
+++ b/src/tests/ofc23/descriptors/backup/descriptor_parent.json
@@ -0,0 +1,258 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", "device_drivers": [7],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http", "username": "admin", "password": "admin"
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "MW"}}, "device_type": "microwave-radio-system", "device_drivers": [4, 5],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "nms5ux", "password": "nms5ux", "timeout": 120, "scheme": "https",
+                    "node_ids": ["192.168.27.139", "192.168.27.140"]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "open-line-system", "device_drivers": [2],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "cttc-ols.cttc-ols.svc.cluster.local"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "4900"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"timeout": 120}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "IPM"}}, "device_type": "xr-constellation", "device_drivers": [6],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8444"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "xr-user-1", "password": "xr-user-1", "hub_module_name": "OFC HUB 1",
+                    "consistency-mode": "lifecycle", "import_topology": "devices"
+                }}}
+            ]}
+        },
+
+
+        {
+            "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "device_type": "emu-optical-splitter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "optical/internal", "uuid": "common"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf1"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf2"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf3"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/eth1==R149/eth-1/0/22"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/22==DC1/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}},
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/9==MW/192.168.27.140:5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/9"}},
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.140:5"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "MW/192.168.27.140:5==R149/eth-1/0/9"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.140:5"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/9"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "MW/192.168.27.139:5==OFC HUB 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.139:5"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/1/1==MW/192.168.27.139:5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.139:5"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/XR-T1==Optical-Splitter/common"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/common==OFC HUB 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf1==OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/79516f5e-55a0-5671-977a-1f5cc934e700==Optical-Splitter/leaf1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf2==OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/30d9323e-b916-51ce-a9a8-cf88f62eb77f==Optical-Splitter/leaf2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/XR-T1==OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/68ac012e-54d4-5846-b5dc-6ec356404f90==OFC LEAF 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/XR-T1==OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/367b19b1-3172-54d8-bdd4-12d3ac5604f6==OFC LEAF 2/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "367b19b1-3172-54d8-bdd4-12d3ac5604f6"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/1/1==R155/eth-1/0/25"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/25==OFC LEAF 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/1/1==R199/eth-1/0/20"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/20==OFC LEAF 2/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/22==DC2/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth1==R155/eth-1/0/22"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/21==DC2/eth2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/21"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth2==R199/eth-1/0/21"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/21"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/emulated/dc-2-dc-service.json b/src/tests/ofc23/descriptors/emulated/dc-2-dc-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..7c3be015d0965d4bdaed8e225e79da072a7de6f3
--- /dev/null
+++ b/src/tests/ofc23/descriptors/emulated/dc-2-dc-service.json
@@ -0,0 +1,41 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "dc-2-dc-svc"}
+            },
+            "service_type": 2,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}},
+                {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}}
+            ],
+            "service_constraints": [
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 15.2}}
+            ],
+            "service_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {
+                    "address_families": ["IPV4"], "bgp_as": 65000, "bgp_route_target": "65000:123",
+                    "mtu": 1512, "vlan_id": 300
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[PE1]/endpoint[1/1]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "10.0.0.1",
+                    "address_ip": "3.3.1.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[PE2]/endpoint[1/1]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "10.0.0.2",
+                    "address_ip": "3.3.2.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[PE3]/endpoint[1/1]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "10.0.0.3",
+                    "address_ip": "3.3.3.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[PE4]/endpoint[1/1]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "10.0.0.4",
+                    "address_ip": "3.3.4.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300
+                }}}
+            ]}
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/emulated/descriptor_child.json b/src/tests/ofc23/descriptors/emulated/descriptor_child.json
new file mode 100644
index 0000000000000000000000000000000000000000..1dc6fd35531db1989b9b85c846b6fc8d0524f08f
--- /dev/null
+++ b/src/tests/ofc23/descriptors/emulated/descriptor_child.json
@@ -0,0 +1,149 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "PE1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "PE2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "PE3"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "PE4"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/3"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "2/4"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE1/2/2==PE2/2/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "2/2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "2/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE1/2/3==PE3/2/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "2/3"}},
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "2/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE1/2/4==PE4/2/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "2/4"}},
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "2/1"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE2/2/1==PE1/2/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "2/1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "2/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE2/2/3==PE3/2/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "2/3"}},
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "2/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE2/2/4==PE4/2/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "2/4"}},
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "2/2"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE3/2/1==PE1/2/3"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "2/1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "2/3"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE3/2/2==PE2/2/3"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "2/2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "2/3"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/2/2==PE2/2/4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "2/2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "2/4"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/2/1==PE1/2/4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "2/1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "2/4"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/2/2==PE2/2/4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "2/2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "2/4"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/2/3==PE3/2/4"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "2/3"}},
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "2/4"}}
+            ]
+        }
+
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/emulated/descriptor_parent.json b/src/tests/ofc23/descriptors/emulated/descriptor_parent.json
new file mode 100644
index 0000000000000000000000000000000000000000..1b1f5dbfd57b2e1543e86ba8d2633a0e944fced5
--- /dev/null
+++ b/src/tests/ofc23/descriptors/emulated/descriptor_parent.json
@@ -0,0 +1,258 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", "device_drivers": [7],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http", "username": "admin", "password": "admin"
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "MW"}}, "device_type": "microwave-radio-system", "device_drivers": [4, 5],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "timeout": 120, "scheme": "https",
+                    "node_ids": ["192.168.27.139", "192.168.27.140"]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "open-line-system", "device_drivers": [2],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "cttc-ols.cttc-ols.svc.cluster.local"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "4900"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"timeout": 120}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "IPM"}}, "device_type": "xr-constellation", "device_drivers": [6],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8444"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "xr-user-1", "password": "xr-user-1", "hub_module_name": "OFC HUB 1",
+                    "consistency-mode": "lifecycle", "import_topology": "devices"
+                }}}
+            ]}
+        },
+
+
+        {
+            "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "device_type": "emu-optical-splitter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "optical/internal", "uuid": "common"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf1"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf2"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf3"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/eth1==R149/eth-1/0/22"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/22==DC1/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}},
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/9==MW/192.168.27.140:5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/9"}},
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.140:5"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "MW/192.168.27.140:5==R149/eth-1/0/9"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.140:5"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/9"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "MW/192.168.27.139:5==OFC HUB 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.139:5"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/1/1==MW/192.168.27.139:5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.139:5"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/XR-T1==Optical-Splitter/common"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/common==OFC HUB 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf1==OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/79516f5e-55a0-5671-977a-1f5cc934e700==Optical-Splitter/leaf1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf2==OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/30d9323e-b916-51ce-a9a8-cf88f62eb77f==Optical-Splitter/leaf2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/XR-T1==OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/68ac012e-54d4-5846-b5dc-6ec356404f90==OFC LEAF 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/XR-T1==OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/367b19b1-3172-54d8-bdd4-12d3ac5604f6==OFC LEAF 2/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "367b19b1-3172-54d8-bdd4-12d3ac5604f6"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/1/1==R155/eth-1/0/25"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/25==OFC LEAF 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/1/1==R199/eth-1/0/20"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/20==OFC LEAF 2/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/22==DC2/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth1==R155/eth-1/0/22"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/21==DC2/eth2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/21"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth2==R199/eth-1/0/21"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/21"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/emulated/descriptor_parent_noxr.json b/src/tests/ofc23/descriptors/emulated/descriptor_parent_noxr.json
new file mode 100644
index 0000000000000000000000000000000000000000..c4a6646ede081fc2f6ee449d7771de3dbcbd77ec
--- /dev/null
+++ b/src/tests/ofc23/descriptors/emulated/descriptor_parent_noxr.json
@@ -0,0 +1,332 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", "device_drivers": [7],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http", "username": "admin", "password": "admin"
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "MW1-2"}}, "device_type": "microwave-radio-system", "device_drivers": [5],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "timeout": 120, "scheme": "https",
+                    "node_ids": ["172.18.0.1", "172.18.0.2"]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "MW3-4"}}, "device_type": "microwave-radio-system", "device_drivers": [5],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "timeout": 120, "scheme": "https",
+                    "node_ids": ["172.18.0.3", "172.18.0.4"]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "open-line-system", "device_drivers": [2],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "cttc-ols.cttc-ols.svc.cluster.local"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "4900"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"timeout": 120}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/3"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "device_type": "emu-optical-splitter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "optical/internal", "uuid": "common"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf1"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf2"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf3"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/2"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R3"}}, "device_type": "emu-packet-router", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "1/2"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/eth1==PE1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE1/1/1==DC1/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/eth2==PE2/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE2/1/1==DC1/eth2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE1/1/2==MW1-2/172.18.0.1:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.1:1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "MW1-2/172.18.0.1:1==PE1/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.1:1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "MW1-2/172.18.0.2:1==R1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.2:1"}},
+                {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R1/1/1==MW1-2/172.18.0.2:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.2:1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE2/1/2==MW3-4/172.18.0.3:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.3:1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "MW3-4/172.18.0.3:1==PE2/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.3:1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "MW3-4/172.18.0.4:1==R1/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.4:1"}},
+                {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R1/1/2==MW3-4/172.18.0.4:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.4:1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R1/1/3==Optical-Splitter/common"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "1/3"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/common==R1/1/3"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}},
+                {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "1/3"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf1==OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/79516f5e-55a0-5671-977a-1f5cc934e700==Optical-Splitter/leaf1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf2==OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/30d9323e-b916-51ce-a9a8-cf88f62eb77f==Optical-Splitter/leaf2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R2/1/1==OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/68ac012e-54d4-5846-b5dc-6ec356404f90==R2/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"}},
+                {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R3/1/1==OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/367b19b1-3172-54d8-bdd4-12d3ac5604f6==R3/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "367b19b1-3172-54d8-bdd4-12d3ac5604f6"}},
+                {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R2/1/2==PE3/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE3/1/2==R2/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R3/1/2==PE4/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/1/2==R3/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE3/1/1==DC2/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth1==PE3/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/1/1==DC2/eth2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth2==PE4/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/emulated/ipm-ctrl.json b/src/tests/ofc23/descriptors/emulated/ipm-ctrl.json
new file mode 100644
index 0000000000000000000000000000000000000000..91e9de611dac2627525bb11f81755ea651887e74
--- /dev/null
+++ b/src/tests/ofc23/descriptors/emulated/ipm-ctrl.json
@@ -0,0 +1,25 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "XR-CONSTELLATION"}},
+            "device_type": "xr-constellation",
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8444"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "xr-user-1", "password": "xr-user-1", "hub_module_name": "OFC HUB 1",
+                    "consistency-mode": "lifecycle"
+                }}}
+            ]},
+            "device_operational_status": 1,
+            "device_drivers": [6],
+            "device_endpoints": []
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/emulated/old/descriptor_parent.json b/src/tests/ofc23/descriptors/emulated/old/descriptor_parent.json
new file mode 100644
index 0000000000000000000000000000000000000000..413b7566292d7841777547aeb665c7eb3b8ca293
--- /dev/null
+++ b/src/tests/ofc23/descriptors/emulated/old/descriptor_parent.json
@@ -0,0 +1,311 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", "device_drivers": [7],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http", "username": "admin", "password": "admin"
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "MW1-2"}}, "device_type": "microwave-radio-system", "device_drivers": [5],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "timeout": 120, "scheme": "https",
+                    "node_ids": ["172.18.0.1", "172.18.0.2"]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "MW3-4"}}, "device_type": "microwave-radio-system", "device_drivers": [5],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "timeout": 120, "scheme": "https",
+                    "node_ids": ["172.18.0.3", "172.18.0.4"]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "open-line-system", "device_drivers": [2],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "cttc-ols.cttc-ols.svc.cluster.local"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "4900"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"timeout": 120}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "IPM"}}, "device_type": "xr-constellation", "device_drivers": [6],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8444"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "xr-user-1", "password": "xr-user-1", "hub_module_name": "OFC HUB 1",
+                    "consistency-mode": "lifecycle", "import_topology": "devices"
+                }}}
+            ]}
+        },
+
+
+        {
+            "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "device_type": "emu-optical-splitter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "optical/internal", "uuid": "common"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf1"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf2"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf3"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/eth1==PE1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE1/1/1==DC1/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/eth2==PE2/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE2/1/1==DC1/eth2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE1/1/2==MW1-2/172.18.0.1:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.1:1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "MW1-2/172.18.0.1:1==PE1/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.1:1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE1"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "MW1-2/172.18.0.2:1==OFC HUB 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.2:1"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/1/1==MW1-2/172.18.0.2:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "MW1-2"}}, "endpoint_uuid": {"uuid": "172.18.0.2:1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE2/1/2==MW3-4/172.18.0.3:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.3:1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "MW3-4/172.18.0.3:1==PE2/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.3:1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE2"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "MW3-4/172.18.0.4:1==OFC HUB 1/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.4:1"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/1/2==MW3-4/172.18.0.4:1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "MW3-4"}}, "endpoint_uuid": {"uuid": "172.18.0.4:1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/XR-T1==Optical-Splitter/common"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/common==OFC HUB 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf1==OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/79516f5e-55a0-5671-977a-1f5cc934e700==Optical-Splitter/leaf1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf2==OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/30d9323e-b916-51ce-a9a8-cf88f62eb77f==Optical-Splitter/leaf2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/XR-T1==OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/68ac012e-54d4-5846-b5dc-6ec356404f90==OFC LEAF 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/XR-T1==OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/367b19b1-3172-54d8-bdd4-12d3ac5604f6==OFC LEAF 2/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "367b19b1-3172-54d8-bdd4-12d3ac5604f6"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/1/1==PE3/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE3/1/2==OFC LEAF 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/1/1==PE4/1/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/1/2==OFC LEAF 2/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/2"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE3/1/1==DC2/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth1==PE3/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "PE3"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "PE4/1/1==DC2/eth2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth2==PE4/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}},
+                {"device_id": {"device_uuid": {"uuid": "PE4"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/real/dc-2-dc-service.json b/src/tests/ofc23/descriptors/real/dc-2-dc-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..3a83afa6de81f137204aecc5f0eca476aad71e61
--- /dev/null
+++ b/src/tests/ofc23/descriptors/real/dc-2-dc-service.json
@@ -0,0 +1,37 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "dc-2-dc-svc"}
+            },
+            "service_type": 2,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}},
+                {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}}
+            ],
+            "service_constraints": [
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 15.2}}
+            ],
+            "service_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {
+                    "address_families": ["IPV4"], "bgp_as": 65000, "bgp_route_target": "65000:123",
+                    "mtu": 1512, "vlan_id": 111
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[R149]/endpoint[eth-1/0/22]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "5.5.5.5",
+                    "address_ip": "172.16.4.1", "address_prefix": 24, "sub_interface_index": 0, "vlan_id": 111
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[R155]/endpoint[eth-1/0/22]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "5.5.5.1",
+                    "address_ip": "172.16.2.1", "address_prefix": 24, "sub_interface_index": 0, "vlan_id": 111
+                }}},
+                {"action": 1, "custom": {"resource_key": "/device[R199]/endpoint[eth-1/0/21]/settings", "resource_value": {
+                    "route_distinguisher": "65000:123", "router_id": "5.5.5.6",
+                    "address_ip": "172.16.1.1", "address_prefix": 24, "sub_interface_index": 0, "vlan_id": 111
+                }}}
+            ]}
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/real/descriptor_child.json b/src/tests/ofc23/descriptors/real/descriptor_child.json
new file mode 100644
index 0000000000000000000000000000000000000000..8d695cfd2d419f263b554d0f2bf648b92cdde672
--- /dev/null
+++ b/src/tests/ofc23/descriptors/real/descriptor_child.json
@@ -0,0 +1,93 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "R199"}}, "device_type": "packet-router", "device_drivers": [1],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.95.86.199"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "830"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin",
+                    "force_running": false, "hostkey_verify": false, "look_for_keys": false,
+                    "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "huaweiyang"},
+                    "manager_params": {"timeout" : 86400}
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R149"}}, "device_type": "packet-router", "device_drivers": [1],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.95.86.149"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "830"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin",
+                    "force_running": false, "hostkey_verify": false, "look_for_keys": false,
+                    "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "huaweiyang"},
+                    "manager_params": {"timeout" : 86400}
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "R155"}}, "device_type": "packet-router", "device_drivers": [1],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.95.86.155"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "830"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin",
+                    "force_running": false, "hostkey_verify": false, "look_for_keys": false,
+                    "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "huaweiyang"},
+                    "manager_params": {"timeout" : 86400}
+                }}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/19==R155/eth-1/0/19"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/19==R199/eth-1/0/19"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/19"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/20==R149/eth-1/0/20"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/20==R199/eth-1/0/20"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/25==R155/eth-1/0/25"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/25==R149/eth-1/0/25"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/ofc23/descriptors/real/descriptor_parent.json b/src/tests/ofc23/descriptors/real/descriptor_parent.json
new file mode 100644
index 0000000000000000000000000000000000000000..3317d46edaf6d270125d8b094c3b6384a3dd52fd
--- /dev/null
+++ b/src/tests/ofc23/descriptors/real/descriptor_parent.json
@@ -0,0 +1,258 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", "device_drivers": [7],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http", "username": "admin", "password": "admin"
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "MW"}}, "device_type": "microwave-radio-system", "device_drivers": [4, 5],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "192.168.27.136"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "nms5ux", "password": "nms5ux", "timeout": 120, "scheme": "https",
+                    "node_ids": ["192.168.27.139", "192.168.27.140"]
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "open-line-system", "device_drivers": [2],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "cttc-ols.cttc-ols.svc.cluster.local"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "4900"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"timeout": 120}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "IPM"}}, "device_type": "xr-constellation", "device_drivers": [6],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.95.86.126"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "443"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "xr-user-1", "password": "xr-user-1", "hub_module_name": "OFC HUB 1",
+                    "consistency-mode": "lifecycle", "import_topology": "devices"
+                }}}
+            ]}
+        },
+
+
+        {
+            "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "device_type": "emu-optical-splitter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "optical/internal", "uuid": "common"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf1"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf2"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf3"},
+                    {"sample_types": [], "type": "optical/internal", "uuid": "leaf4"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0],
+            "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth1"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "eth2"},
+                    {"sample_types": [], "type": "copper/internal", "uuid": "int"}
+                ]}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "DC1/eth1==R149/eth-1/0/22"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/22==DC1/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}},
+                {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R149/eth-1/0/9==MW/192.168.27.140:5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/9"}},
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.140:5"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "MW/192.168.27.140:5==R149/eth-1/0/9"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.140:5"}},
+                {"device_id": {"device_uuid": {"uuid": "R149"}}, "endpoint_uuid": {"uuid": "eth-1/0/9"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "MW/192.168.27.139:5==OFC HUB 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.139:5"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/1/1==MW/192.168.27.139:5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "192.168.27.139:5"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC HUB 1/XR-T1==Optical-Splitter/common"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/common==OFC HUB 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "common"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC HUB 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf1==OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/79516f5e-55a0-5671-977a-1f5cc934e700==Optical-Splitter/leaf1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "Optical-Splitter/leaf2==OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/30d9323e-b916-51ce-a9a8-cf88f62eb77f==Optical-Splitter/leaf2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"}},
+                {"device_id": {"device_uuid": {"uuid": "Optical-Splitter"}}, "endpoint_uuid": {"uuid": "leaf2"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/XR-T1==OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/68ac012e-54d4-5846-b5dc-6ec356404f90==OFC LEAF 1/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/XR-T1==OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}},
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "OLS/367b19b1-3172-54d8-bdd4-12d3ac5604f6==OFC LEAF 2/XR-T1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "367b19b1-3172-54d8-bdd4-12d3ac5604f6"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "XR-T1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 1/1/1==R155/eth-1/0/25"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/25==OFC LEAF 1/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/25"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 1"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "OFC LEAF 2/1/1==R199/eth-1/0/20"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/20==OFC LEAF 2/1/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/20"}},
+                {"device_id": {"device_uuid": {"uuid": "OFC LEAF 2"}}, "endpoint_uuid": {"uuid": "1/1"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R155/eth-1/0/22==DC2/eth1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth1==R155/eth-1/0/22"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "R155"}}, "endpoint_uuid": {"uuid": "eth-1/0/22"}}
+            ]
+        },
+
+
+        {
+            "link_id": {"link_uuid": {"uuid": "R199/eth-1/0/21==DC2/eth2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/21"}},
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "DC2/eth2==R199/eth-1/0/21"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth2"}},
+                {"device_id": {"device_uuid": {"uuid": "R199"}}, "endpoint_uuid": {"uuid": "eth-1/0/21"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/ofc23/dump_logs.sh b/src/tests/ofc23/dump_logs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..cc3162b337c1b35e9ff158b400a8d5c47931bdca
--- /dev/null
+++ b/src/tests/ofc23/dump_logs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+rm -rf tmp/exec
+
+echo "Collecting logs for Parent..."
+mkdir -p tmp/exec/parent
+kubectl --namespace tfs-parent logs deployments/contextservice server > tmp/exec/parent/context.log
+kubectl --namespace tfs-parent logs deployments/deviceservice server > tmp/exec/parent/device.log
+kubectl --namespace tfs-parent logs deployments/serviceservice server > tmp/exec/parent/service.log
+kubectl --namespace tfs-parent logs deployments/pathcompservice frontend > tmp/exec/parent/pathcomp-frontend.log
+kubectl --namespace tfs-parent logs deployments/pathcompservice backend > tmp/exec/parent/pathcomp-backend.log
+kubectl --namespace tfs-parent logs deployments/sliceservice server > tmp/exec/parent/slice.log
+printf "\n"
+
+echo "Collecting logs for Child..."
+mkdir -p tmp/exec/child
+kubectl --namespace tfs-child logs deployments/contextservice server > tmp/exec/child/context.log
+kubectl --namespace tfs-child logs deployments/deviceservice server > tmp/exec/child/device.log
+kubectl --namespace tfs-child logs deployments/serviceservice server > tmp/exec/child/service.log
+kubectl --namespace tfs-child logs deployments/pathcompservice frontend > tmp/exec/child/pathcomp-frontend.log
+kubectl --namespace tfs-child logs deployments/pathcompservice backend > tmp/exec/child/pathcomp-backend.log
+kubectl --namespace tfs-child logs deployments/sliceservice server > tmp/exec/child/slice.log
+printf "\n"
+
+echo "Done!"
diff --git a/src/tests/ofc23/fast_redeploy.sh b/src/tests/ofc23/fast_redeploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..58d1193ded582d4fdff3222d5bcdc0fe510a7034
--- /dev/null
+++ b/src/tests/ofc23/fast_redeploy.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+kubectl delete namespace tfs-parent tfs-child
+
+echo "Deploying tfs-parent ..."
+kubectl delete -f ofc23/nginx-ingress-controller-parent.yaml                 > ./tmp/logs/deploy-tfs-parent.log
+kubectl create namespace tfs-parent                                          > ./tmp/logs/deploy-tfs-parent.log
+kubectl apply -f ofc23/nginx-ingress-controller-parent.yaml                  > ./tmp/logs/deploy-tfs-parent.log
+kubectl --namespace tfs-parent apply -f ./tmp/manifests/contextservice.yaml  > ./tmp/logs/deploy-tfs-parent.log
+kubectl --namespace tfs-parent apply -f ./tmp/manifests/deviceservice.yaml   > ./tmp/logs/deploy-tfs-parent.log
+kubectl --namespace tfs-parent apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-parent.log
+kubectl --namespace tfs-parent apply -f ./tmp/manifests/serviceservice.yaml  > ./tmp/logs/deploy-tfs-parent.log
+kubectl --namespace tfs-parent apply -f ./tmp/manifests/sliceservice.yaml    > ./tmp/logs/deploy-tfs-parent.log
+kubectl --namespace tfs-parent apply -f ./tmp/manifests/webuiservice.yaml    > ./tmp/logs/deploy-tfs-parent.log
+kubectl --namespace tfs-parent apply -f ofc23/tfs-ingress-parent.yaml        > ./tmp/logs/deploy-tfs-parent.log
+printf "\n"
+
+echo "Deploying tfs-child ..."
+kubectl delete -f ofc23/nginx-ingress-controller-child.yaml                  > ./tmp/logs/deploy-tfs-child.log
+kubectl create namespace tfs-child                                           > ./tmp/logs/deploy-tfs-child.log
+kubectl apply -f ofc23/nginx-ingress-controller-child.yaml                   > ./tmp/logs/deploy-tfs-child.log
+kubectl --namespace tfs-child apply -f ./tmp/manifests/contextservice.yaml   > ./tmp/logs/deploy-tfs-child.log
+kubectl --namespace tfs-child apply -f ./tmp/manifests/deviceservice.yaml    > ./tmp/logs/deploy-tfs-child.log
+kubectl --namespace tfs-child apply -f ./tmp/manifests/pathcompservice.yaml  > ./tmp/logs/deploy-tfs-child.log
+kubectl --namespace tfs-child apply -f ./tmp/manifests/serviceservice.yaml   > ./tmp/logs/deploy-tfs-child.log
+kubectl --namespace tfs-child apply -f ./tmp/manifests/sliceservice.yaml     > ./tmp/logs/deploy-tfs-child.log
+kubectl --namespace tfs-child apply -f ./tmp/manifests/webuiservice.yaml     > ./tmp/logs/deploy-tfs-child.log
+kubectl --namespace tfs-child apply -f ofc23/tfs-ingress-child.yaml          > ./tmp/logs/deploy-tfs-child.log
+printf "\n"
+
+echo "Waiting tfs-parent ..."
+kubectl wait --namespace tfs-parent --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-parent --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-parent --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-parent --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-parent --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-parent --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Waiting tfs-child ..."
+kubectl wait --namespace tfs-child --for='condition=available' --timeout=300s deployment/contextservice
+kubectl wait --namespace tfs-child --for='condition=available' --timeout=300s deployment/deviceservice
+kubectl wait --namespace tfs-child --for='condition=available' --timeout=300s deployment/pathcompservice
+kubectl wait --namespace tfs-child --for='condition=available' --timeout=300s deployment/serviceservice
+kubectl wait --namespace tfs-child --for='condition=available' --timeout=300s deployment/sliceservice
+kubectl wait --namespace tfs-child --for='condition=available' --timeout=300s deployment/webuiservice
+printf "\n"
+
+echo "Done!"
diff --git a/src/tests/ofc23/nginx-ingress-controller-child.yaml b/src/tests/ofc23/nginx-ingress-controller-child.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..00a64d75e9a9a2cb93cdbd6d89790e26b0730eb6
--- /dev/null
+++ b/src/tests/ofc23/nginx-ingress-controller-child.yaml
@@ -0,0 +1,134 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-child
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-child
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-child
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-child
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-child
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-child
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-child
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-child
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-child
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8002
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4432
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12542
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-child
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-child
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-child
+        - --election-id=ingress-controller-leader-child
+        - --controller-class=tfs.etsi.org/controller-class-child
+        - --ingress-class=tfs-ingress-class-child
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/ofc23/nginx-ingress-controller-parent.yaml b/src/tests/ofc23/nginx-ingress-controller-parent.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c504c2e6766c1ad5b81a2479b4d05a09ba46d906
--- /dev/null
+++ b/src/tests/ofc23/nginx-ingress-controller-parent.yaml
@@ -0,0 +1,134 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-load-balancer-microk8s-conf-parent
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-udp-microk8s-conf-parent
+  namespace: ingress
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: nginx-ingress-tcp-microk8s-conf-parent
+  namespace: ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: tfs-ingress-class-parent
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "false"
+spec:
+  controller: tfs.etsi.org/controller-class-parent
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: nginx-ingress-microk8s-controller-parent
+  namespace: ingress
+  labels:
+    microk8s-application: nginx-ingress-microk8s-parent
+spec:
+  selector:
+    matchLabels:
+      name: nginx-ingress-microk8s-parent
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: nginx-ingress-microk8s-parent
+    spec:
+      terminationGracePeriodSeconds: 60
+      restartPolicy: Always
+      serviceAccountName: nginx-ingress-microk8s-serviceaccount
+      containers:
+      - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0
+        imagePullPolicy: IfNotPresent
+        name: nginx-ingress-microk8s
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          periodSeconds: 10
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        lifecycle:
+          preStop:
+            exec:
+              command:
+                - /wait-shutdown
+        securityContext:
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101 # www-data
+        env:
+          - name: POD_NAME
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.name
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 80
+          hostPort: 8001
+          protocol: TCP
+        - name: https
+          containerPort: 443
+          hostPort: 4431
+          protocol: TCP
+        - name: health
+          containerPort: 10254
+          hostPort: 12541
+          protocol: TCP
+        args:
+        - /nginx-ingress-controller
+        - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-parent
+        - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-parent
+        - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-parent
+        - --election-id=ingress-controller-leader-parent
+        - --controller-class=tfs.etsi.org/controller-class-parent
+        - --ingress-class=tfs-ingress-class-parent
+        - ' '
+        - --publish-status-address=127.0.0.1
diff --git a/src/tests/ofc23/show_deploy.sh b/src/tests/ofc23/show_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d4e112b0f494e4a6dba964eda4e31652b8548043
--- /dev/null
+++ b/src/tests/ofc23/show_deploy.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-parent get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-parent get ingress
+printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-child get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-child get ingress
+printf "\n"
diff --git a/src/tests/ofc23/show_deploy_sligrp.sh b/src/tests/ofc23/show_deploy_sligrp.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b5e3600ba99ec2e4de4a953f99c44ed2d88bba57
--- /dev/null
+++ b/src/tests/ofc23/show_deploy_sligrp.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+echo "Deployment Resources:"
+kubectl --namespace tfs get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs get ingress
+printf "\n"
diff --git a/src/tests/ofc23/tfs-ingress-child.yaml b/src/tests/ofc23/tfs-ingress-child.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a93b9321c9f25c78e8423413c4225f78c7aee719
--- /dev/null
+++ b/src/tests/ofc23/tfs-ingress-child.yaml
@@ -0,0 +1,53 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-child
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-child
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/tests/ofc23/tfs-ingress-parent.yaml b/src/tests/ofc23/tfs-ingress-parent.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..baf506dd90f320a1913beb8becd39164daa21370
--- /dev/null
+++ b/src/tests/ofc23/tfs-ingress-parent.yaml
@@ -0,0 +1,53 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress-parent
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  ingressClassName: tfs-ingress-class-parent
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/src/tests/tools/mock_ipm_sdn_ctrl/MockIPMSdnCtrl.py b/src/tests/tools/mock_ipm_sdn_ctrl/MockIPMSdnCtrl.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecac81be7e3bdff1dcbac458142ea15bf367a1a1
--- /dev/null
+++ b/src/tests/tools/mock_ipm_sdn_ctrl/MockIPMSdnCtrl.py
@@ -0,0 +1,192 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Mock IPM controller (implements minimal support)
+
+import functools, json, logging, sys, time, uuid
+from typing import Any, Dict, Optional, Tuple
+from flask import Flask, jsonify, make_response, request
+from flask_restful import Api, Resource
+
+BIND_ADDRESS = '0.0.0.0'
+BIND_PORT    = 8444
+IPM_USERNAME = 'xr-user-1'
+IPM_PASSWORD = 'xr-user-1'
+STR_ENDPOINT = 'https://{:s}:{:s}'.format(str(BIND_ADDRESS), str(BIND_PORT))
+LOG_LEVEL    = logging.DEBUG
+
+CONSTELLATION = {
+    'id': 'ofc-constellation',
+    'hubModule': {'state': {
+        'module': {'moduleName': 'OFC HUB 1', 'trafficMode': 'L1Mode', 'capacity': 100},
+        'endpoints': [{'moduleIf': {'clientIfAid': 'XR-T1'}}, {'moduleIf': {'clientIfAid': 'XR-T4'}}]
+    }},
+    'leafModules': [
+        {'state': {
+            'module': {'moduleName': 'OFC LEAF 1', 'trafficMode': 'L1Mode', 'capacity': 100},
+            'endpoints': [{'moduleIf': {'clientIfAid': 'XR-T1'}}]
+        }},
+        {'state': {
+            'module': {'moduleName': 'OFC LEAF 2', 'trafficMode': 'L1Mode', 'capacity': 100},
+            'endpoints': [{'moduleIf': {'clientIfAid': 'XR-T1'}}]
+        }}
+    ]
+}
+
+CONNECTIONS : Dict[str, Any] = dict()
+STATE_NAME_TO_CONNECTION : Dict[str, str] = dict()
+
+def select_module_state(module_name : str) -> Optional[Dict]:
+    hub_module_state = CONSTELLATION.get('hubModule', {}).get('state', {})
+    if module_name == hub_module_state.get('module', {}).get('moduleName'): return hub_module_state
+    for leaf_module in CONSTELLATION.get('leafModules', []):
+        leaf_module_state = leaf_module.get('state', {})
+        if module_name == leaf_module_state.get('module', {}).get('moduleName'): return leaf_module_state
+    return None
+
+def select_endpoint(module_state : Dict, module_if : str) -> Optional[Dict]:
+    for endpoint in module_state.get('endpoints', []):
+        if module_if == endpoint.get('moduleIf', {}).get('clientIfAid'): return endpoint
+    return None
+
+def select_module_endpoint(selector : Dict) -> Optional[Tuple[Dict, Dict]]:
+    selected_module_name = selector['moduleIfSelectorByModuleName']['moduleName']
+    selected_module_if = selector['moduleIfSelectorByModuleName']['moduleClientIfAid']
+    module_state = select_module_state(selected_module_name)
+    if module_state is None: return None
+    return module_state, select_endpoint(module_state, selected_module_if)
+
+def compose_endpoint(endpoint_selector : Dict) -> Dict:
+   module, endpoint = select_module_endpoint(endpoint_selector['selector'])
+   return {
+       'href': '/' + str(uuid.uuid4()),
+       'state': {
+            'moduleIf': {
+                'moduleName': module['module']['moduleName'],
+                'clientIfAid': endpoint['moduleIf']['clientIfAid'],
+            },
+            'capacity': module['module']['capacity'],
+        }
+    }
+
+logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+LOGGER = logging.getLogger(__name__)
+
+logging.getLogger('werkzeug').setLevel(logging.WARNING)
+
+def log_request(logger : logging.Logger, response):
+    timestamp = time.strftime('[%Y-%b-%d %H:%M]')
+    logger.info('%s %s %s %s %s', timestamp, request.remote_addr, request.method, request.full_path, response.status)
+    return response
+
+class OpenIdConnect(Resource):
+    ACCESS_TOKENS = {}
+
+    def post(self):
+        if request.content_type != 'application/x-www-form-urlencoded': return make_response('bad content type', 400)
+        if request.content_length == 0: return make_response('bad content length', 400)
+        request_form = request.form
+        if request_form.get('client_id') != 'xr-web-client': return make_response('bad client_id', 403)
+        if request_form.get('client_secret') != 'xr-web-client': return make_response('bad client_secret', 403)
+        if request_form.get('grant_type') != 'password': return make_response('bad grant_type', 403)
+        if request_form.get('username') != IPM_USERNAME: return make_response('bad username', 403)
+        if request_form.get('password') != IPM_PASSWORD: return make_response('bad password', 403)
+        access_token = OpenIdConnect.ACCESS_TOKENS.setdefault(IPM_USERNAME, uuid.uuid4())
+        reply = {'access_token': access_token, 'expires_in': 86400}
+        return make_response(jsonify(reply), 200)
+
+class XrNetworks(Resource):
+    def get(self):
+        query = json.loads(request.args.get('q'))
+        hub_module_name = query.get('hubModule.state.module.moduleName')
+        if hub_module_name != 'OFC HUB 1': return make_response('unexpected hub module', 404)
+        return make_response(jsonify([CONSTELLATION]), 200)
+
+class XrNetworkConnections(Resource):
+    def get(self):
+        query = json.loads(request.args.get('q'))
+        state_name = query.get('state.name')
+        if state_name is None:
+            connections = [connection for connection in CONNECTIONS.values()]
+        else:
+            connection_uuid = STATE_NAME_TO_CONNECTION.get(state_name)
+            if connection_uuid is None: return make_response('state name not found', 404)
+            connection = CONNECTIONS.get(connection_uuid)
+            if connection is None: return make_response('connection for state name not found', 404)
+            connections = [connection]
+        return make_response(jsonify(connections), 200)
+
+    def post(self):
+        if request.content_type != 'application/json': return make_response('bad content type', 400)
+        if request.content_length == 0: return make_response('bad content length', 400)
+        request_json = request.json
+        if not isinstance(request_json, list): return make_response('content is not list', 400)
+        reply = []
+        for connection in request_json:
+            connection_uuid = str(uuid.uuid4())
+            state_name = connection['name']
+
+            if state_name is not None: STATE_NAME_TO_CONNECTION[state_name] = connection_uuid
+            CONNECTIONS[connection_uuid] = {
+                'href': '/network-connections/{:s}'.format(str(connection_uuid)),
+                'config': {
+                    'implicitTransportCapacity': connection['implicitTransportCapacity']
+                    # 'mc': ??
+                },
+                'state': {
+                    'name': state_name,
+                    'serviceMode': connection['serviceMode']
+                    # 'outerVID' : ??
+                },
+                'endpoints': [
+                    compose_endpoint(endpoint)
+                    for endpoint in connection['endpoints']
+                ]
+            }
+            reply.append(CONNECTIONS[connection_uuid])
+        return make_response(jsonify(reply), 202)
+
+class XrNetworkConnection(Resource):
+    def get(self, connection_uuid : str):
+        connection = CONNECTIONS.get(connection_uuid)
+        if connection is None: return make_response('unexpected connection id', 404)
+        return make_response(jsonify(connection), 200)
+
+    def delete(self, connection_uuid : str):
+        connection = CONNECTIONS.pop(connection_uuid, None)
+        if connection is None: return make_response('unexpected connection id', 404)
+        state_name = connection['state']['name']
+        STATE_NAME_TO_CONNECTION.pop(state_name, None)
+        return make_response(jsonify({}), 202)
+
+def main():
+    LOGGER.info('Starting...')
+    
+    app = Flask(__name__)
+    app.after_request(functools.partial(log_request, LOGGER))
+
+    api = Api(app)
+    api.add_resource(OpenIdConnect,        '/realms/xr-cm/protocol/openid-connect/token')
+    api.add_resource(XrNetworks,           '/api/v1/xr-networks')
+    api.add_resource(XrNetworkConnections, '/api/v1/network-connections')
+    api.add_resource(XrNetworkConnection,  '/api/v1/network-connections/<string:connection_uuid>')
+
+    LOGGER.info('Listening on {:s}...'.format(str(STR_ENDPOINT)))
+    app.run(debug=True, host=BIND_ADDRESS, port=BIND_PORT, ssl_context='adhoc')
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/tests/tools/mock_ipm_sdn_ctrl/run.sh b/src/tests/tools/mock_ipm_sdn_ctrl/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2aa78712c58d8cc255b60202d1576de683798d2e
--- /dev/null
+++ b/src/tests/tools/mock_ipm_sdn_ctrl/run.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+python MockIPMSdnCtrl.py
diff --git a/src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py b/src/tests/tools/mock_mw_sdn_ctrl/MockMWSdnCtrl.py
similarity index 54%
rename from src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py
rename to src/tests/tools/mock_mw_sdn_ctrl/MockMWSdnCtrl.py
index 63be214b671c2ee9b222d92e6f964a4203b8a587..c20dde1b9958fb92e4c6f026fbfe55f9ba348ba1 100644
--- a/src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py
+++ b/src/tests/tools/mock_mw_sdn_ctrl/MockMWSdnCtrl.py
@@ -25,8 +25,7 @@
 # Ref: https://blog.miguelgrinberg.com/post/designing-a-restful-api-using-flask-restful
 
 import functools, logging, sys, time
-from flask import Flask, abort, request
-from flask.json import jsonify
+from flask import Flask, abort, jsonify, make_response, request
 from flask_restful import Api, Resource
 
 BIND_ADDRESS = '0.0.0.0'
@@ -36,31 +35,51 @@ STR_ENDPOINT = 'https://{:s}:{:s}{:s}'.format(str(BIND_ADDRESS), str(BIND_PORT),
 LOG_LEVEL    = logging.DEBUG
 
 NETWORK_NODES = [
-    {'node-id': '172.18.0.1', 'ietf-network-topology:termination-point': [
-        {'tp-id': '172.18.0.1:1', 'ietf-te-topology:te': {'name': 'ethernet'}},
-        {'tp-id': '172.18.0.1:2', 'ietf-te-topology:te': {'name': 'antena'  }},
+    {'node-id': '192.168.27.139', 'ietf-network-topology:termination-point': [
+        {'tp-id': '1', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '2', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '3', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '4', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '5', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '6', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '10', 'ietf-te-topology:te': {'name': 'antena' }},
     ]},
-    {'node-id': '172.18.0.2', 'ietf-network-topology:termination-point': [
-        {'tp-id': '172.18.0.2:1', 'ietf-te-topology:te': {'name': 'ethernet'}},
-        {'tp-id': '172.18.0.2:2', 'ietf-te-topology:te': {'name': 'antena'  }},
+    {'node-id': '192.168.27.140', 'ietf-network-topology:termination-point': [
+        {'tp-id': '1', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '2', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '3', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '4', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '5', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '6', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '10', 'ietf-te-topology:te': {'name': 'antena' }},
     ]},
-    {'node-id': '172.18.0.3', 'ietf-network-topology:termination-point': [
-        {'tp-id': '172.18.0.3:1', 'ietf-te-topology:te': {'name': 'ethernet'}},
-        {'tp-id': '172.18.0.3:2', 'ietf-te-topology:te': {'name': 'antena'  }},
+    {'node-id': '192.168.27.141', 'ietf-network-topology:termination-point': [
+        {'tp-id': '1', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '2', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '3', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '4', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '5', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '6', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '10', 'ietf-te-topology:te': {'name': 'antena' }},
     ]},
-    {'node-id': '172.18.0.4', 'ietf-network-topology:termination-point': [
-        {'tp-id': '172.18.0.4:1', 'ietf-te-topology:te': {'name': 'ethernet'}},
-        {'tp-id': '172.18.0.4:2', 'ietf-te-topology:te': {'name': 'antena'  }},
+    {'node-id': '192.168.27.142', 'ietf-network-topology:termination-point': [
+        {'tp-id': '1', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '2', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '3', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '4', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '5', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '6', 'ietf-te-topology:te': {'name': 'ethernet'}},
+        {'tp-id': '10', 'ietf-te-topology:te': {'name': 'antena' }},
     ]}
 ]
 NETWORK_LINKS = [
-    {
-        'source'     : {'source-node': '172.18.0.1', 'source-tp': '172.18.0.1:2'},
-        'destination': {'dest-node'  : '172.18.0.2', 'dest-tp'  : '172.18.0.2:2'},
+    {   'link-id'    : '192.168.27.139:10--192.168.27.140:10',
+        'source'     : {'source-node': '192.168.27.139', 'source-tp': '10'},
+        'destination': {'dest-node'  : '192.168.27.140', 'dest-tp'  : '10'},
     },
-    {
-        'source'     : {'source-node': '172.18.0.3', 'source-tp': '172.18.0.3:2'},
-        'destination': {'dest-node'  : '172.18.0.4', 'dest-tp'  : '172.18.0.4:2'},
+    {   'link-id'    : '192.168.27.141:10--192.168.27.142:10',
+        'source'     : {'source-node': '192.168.27.141', 'source-tp': '10'},
+        'destination': {'dest-node'  : '192.168.27.142', 'dest-tp'  : '10'},
     }
 ]
 NETWORK_SERVICES = {}
@@ -77,21 +96,22 @@ def log_request(logger : logging.Logger, response):
     return response
 
 class Health(Resource):
-    def get(self): return jsonify({})
+    def get(self):
+        return make_response(jsonify({}), 200)
 
 class Network(Resource):
     def get(self, network_uuid : str):
         if network_uuid != 'SIAE-ETH-TOPOLOGY': abort(400)
         network = {'node': NETWORK_NODES, 'ietf-network-topology:link': NETWORK_LINKS}
-        return jsonify({'ietf-network:network': network})
+        return make_response(jsonify({'ietf-network:network': network}), 200)
 
 class Services(Resource):
     def get(self):
         services = [service for service in NETWORK_SERVICES.values()]
-        return jsonify({'ietf-eth-tran-service:etht-svc': {'etht-svc-instances': services}})
+        return make_response(jsonify({'ietf-eth-tran-service:etht-svc': {'etht-svc-instances': services}}), 200)
 
     def post(self):
-        json_request = request.json
+        json_request = request.get_json()
         if not json_request: abort(400)
         if not isinstance(json_request, dict): abort(400)
         if 'etht-svc-instances' not in json_request: abort(400)
@@ -101,12 +121,12 @@ class Services(Resource):
         svc_data = json_services[0]
         etht_svc_name = svc_data['etht-svc-name']
         NETWORK_SERVICES[etht_svc_name] = svc_data
-        return jsonify({}), 201
+        return make_response(jsonify({}), 201)
 
 class DelServices(Resource):
     def delete(self, service_uuid : str):
         NETWORK_SERVICES.pop(service_uuid, None)
-        return jsonify({}), 204
+        return make_response(jsonify({}), 204)
 
 def main():
     LOGGER.info('Starting...')
diff --git a/src/tests/tools/mock_sdn_ctrl/README.md b/src/tests/tools/mock_mw_sdn_ctrl/README.md
similarity index 94%
rename from src/tests/tools/mock_sdn_ctrl/README.md
rename to src/tests/tools/mock_mw_sdn_ctrl/README.md
index d8a6fe6b279553e54f13792cbf12f15b2b380dc2..8568c89ed22e2010bc565e28dc42821181dd6e0a 100644
--- a/src/tests/tools/mock_sdn_ctrl/README.md
+++ b/src/tests/tools/mock_mw_sdn_ctrl/README.md
@@ -12,8 +12,8 @@ Follow the steps below to perform the test:
 ## 1. Deploy TeraFlowSDN controller and the scenario
 Deploy the test scenario "microwave_deploy.sh":
 ```bash
-source src/tests/tools/microwave_deploy.sh
-./deploy.sh
+source src/tests/tools/mock_mw_sdn_ctrl/scenario/microwave_deploy.sh
+./deploy/all.sh
 ```
 
 ## 2. Install requirements and run the Mock MicroWave SDN controller
@@ -27,7 +27,7 @@ pip install Flask==2.1.3 Flask-RESTful==0.3.9
 
 Run the Mock MicroWave SDN Controller as follows:
 ```bash
-python src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py
+python src/tests/tools/mock_mw_sdn_ctrl/MockMWSdnCtrl.py
 ```
 
 ## 3. Deploy the test descriptors
diff --git a/src/tests/tools/mock_mw_sdn_ctrl/run.sh b/src/tests/tools/mock_mw_sdn_ctrl/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..415fc1751f132478889fba2e0ec1f5da742e23f1
--- /dev/null
+++ b/src/tests/tools/mock_mw_sdn_ctrl/run.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+python MockMWSdnCtrl.py
diff --git a/src/tests/tools/mock_sdn_ctrl/microwave_deploy.sh b/src/tests/tools/mock_mw_sdn_ctrl/scenario/microwave_deploy.sh
similarity index 100%
rename from src/tests/tools/mock_sdn_ctrl/microwave_deploy.sh
rename to src/tests/tools/mock_mw_sdn_ctrl/scenario/microwave_deploy.sh
diff --git a/src/tests/tools/mock_sdn_ctrl/network_descriptors.json b/src/tests/tools/mock_mw_sdn_ctrl/scenario/network_descriptors.json
similarity index 100%
rename from src/tests/tools/mock_sdn_ctrl/network_descriptors.json
rename to src/tests/tools/mock_mw_sdn_ctrl/scenario/network_descriptors.json
diff --git a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json b/src/tests/tools/mock_mw_sdn_ctrl/scenario/service_descriptor.json
similarity index 100%
rename from src/tests/tools/mock_sdn_ctrl/service_descriptor.json
rename to src/tests/tools/mock_mw_sdn_ctrl/scenario/service_descriptor.json
diff --git a/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/Dockerfile b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..ad214b97c091bf82a8bfe5c0ce4183d0bae2766e
--- /dev/null
+++ b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/Dockerfile
@@ -0,0 +1,35 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Create component sub-folder, and copy content
+RUN mkdir -p /var/teraflow/mock_mw_sdn_ctrl
+WORKDIR /var/teraflow/mock_mw_sdn_ctrl
+COPY . .
+
+# Get specific Python packages
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Start the service
+ENTRYPOINT ["python", "MockMWSdnCtrl.py"]
diff --git a/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/build.sh b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/build.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4df315cec178cef13eaa059a739bc22efc011d4d
--- /dev/null
+++ b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/build.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+docker build -t mock-mw-sdn-ctrl:test -f Dockerfile .
+docker tag mock-mw-sdn-ctrl:test localhost:32000/tfs/mock-mw-sdn-ctrl:test
+docker push localhost:32000/tfs/mock-mw-sdn-ctrl:test
diff --git a/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/deploy.sh b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ded232e5c50f8cd5ed448ec0193f58c43626f4ad
--- /dev/null
+++ b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/deploy.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+kubectl delete namespace mocks
+kubectl --namespace mocks apply -f mock-mw-sdn-ctrl.yaml
diff --git a/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/mock-mw-sdn-ctrl.yaml b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/mock-mw-sdn-ctrl.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..05b89f949e940ae55ad592b9cc0e82a6eea2e343
--- /dev/null
+++ b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/mock-mw-sdn-ctrl.yaml
@@ -0,0 +1,46 @@
+kind: Namespace
+apiVersion: v1
+metadata:
+  name: mocks
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: mock-mw-sdn-ctrl
+spec:
+  selector:
+    matchLabels:
+      app: mock-mw-sdn-ctrl
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: mock-mw-sdn-ctrl
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+      - name: server
+        image: localhost:32000/tfs/mock-mw-sdn-ctrl:test
+        imagePullPolicy: IfNotPresent
+        ports:
+        - containerPort: 8443
+        resources:
+          requests:
+            cpu: 250m
+            memory: 512Mi
+          limits:
+            cpu: 700m
+            memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: mock-mw-sdn-ctrl
+spec:
+  type: ClusterIP
+  selector:
+    app: mock-mw-sdn-ctrl
+  ports:
+  - name: https
+    port: 8443
+    targetPort: 8443
diff --git a/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/requirements.in b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..f4bc191062c8385b2eeb003832b284313305c795
--- /dev/null
+++ b/src/tests/tools/mock_mw_sdn_ctrl/ssl_not_working/requirements.in
@@ -0,0 +1,21 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cryptography==39.0.1
+pyopenssl==23.0.0
+Flask==2.1.3
+Flask-HTTPAuth==4.5.0
+Flask-RESTful==0.3.9
+jsonschema==4.4.0
+requests==2.27.1
diff --git a/src/tests/tools/mock_mw_sdn_ctrl/test_mw.py b/src/tests/tools/mock_mw_sdn_ctrl/test_mw.py
new file mode 100644
index 0000000000000000000000000000000000000000..0329d30ad234398200c0fe29aac46f72f5a2e924
--- /dev/null
+++ b/src/tests/tools/mock_mw_sdn_ctrl/test_mw.py
@@ -0,0 +1,84 @@
+import json, logging, requests
+from requests.auth import HTTPBasicAuth
+from typing import Optional
+
+LOGGER = logging.getLogger(__name__)
+
+HTTP_OK_CODES = {
+    200,    # OK
+    201,    # Created
+    202,    # Accepted
+    204,    # No Content
+}
+
+def create_connectivity_service(
+    root_url, uuid, node_id_src, tp_id_src, node_id_dst, tp_id_dst, vlan_id,
+    auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None
+):
+
+    url = '{:s}/nmswebs/restconf/data/ietf-eth-tran-service:etht-svc'.format(root_url)
+    headers = {'content-type': 'application/json'}
+    data = {
+        'etht-svc-instances': [
+            {
+                'etht-svc-name': uuid,
+                'etht-svc-type': 'ietf-eth-tran-types:p2p-svc',
+                'etht-svc-end-points': [
+                    {
+                        'etht-svc-access-points': [
+                            {'access-node-id': node_id_src, 'access-ltp-id': tp_id_src, 'access-point-id': '1'}
+                        ],
+                        'outer-tag': {'vlan-value': vlan_id, 'tag-type': 'ietf-eth-tran-types:classify-c-vlan'},
+                        'etht-svc-end-point-name': '{:s}:{:s}'.format(str(node_id_src), str(tp_id_src)),
+                        'service-classification-type': 'ietf-eth-tran-types:vlan-classification'
+                    },
+                    {
+                        'etht-svc-access-points': [
+                            {'access-node-id': node_id_dst, 'access-ltp-id': tp_id_dst, 'access-point-id': '2'}
+                        ],
+                        'outer-tag': {'vlan-value': vlan_id, 'tag-type': 'ietf-eth-tran-types:classify-c-vlan'},
+                        'etht-svc-end-point-name': '{:s}:{:s}'.format(str(node_id_dst), str(tp_id_dst)),
+                        'service-classification-type': 'ietf-eth-tran-types:vlan-classification'
+                    }
+                ]
+            }
+        ]
+    }
+    results = []
+    try:
+        LOGGER.info('Connectivity service {:s}: {:s}'.format(str(uuid), str(data)))
+        response = requests.post(
+            url=url, data=json.dumps(data), timeout=timeout, headers=headers, verify=False, auth=auth)
+        LOGGER.info('Microwave Driver response: {:s}'.format(str(response)))
+    except Exception as e:  # pylint: disable=broad-except
+        LOGGER.exception('Exception creating ConnectivityService(uuid={:s}, data={:s})'.format(str(uuid), str(data)))
+        results.append(e)
+    else:
+        if response.status_code not in HTTP_OK_CODES:
+            msg = 'Could not create ConnectivityService(uuid={:s}, data={:s}). status_code={:s} reply={:s}'
+            LOGGER.error(msg.format(str(uuid), str(data), str(response.status_code), str(response)))
+        results.append(response.status_code in HTTP_OK_CODES)
+    return results
+
+def delete_connectivity_service(root_url, uuid, auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None):
+    url = '{:s}/nmswebs/restconf/data/ietf-eth-tran-service:etht-svc/etht-svc-instances={:s}'
+    url = url.format(root_url, uuid)
+    results = []
+    try:
+        response = requests.delete(url=url, timeout=timeout, verify=False, auth=auth)
+    except Exception as e:  # pylint: disable=broad-except
+        LOGGER.exception('Exception deleting ConnectivityService(uuid={:s})'.format(str(uuid)))
+        results.append(e)
+    else:
+        if response.status_code not in HTTP_OK_CODES:
+            msg = 'Could not delete ConnectivityService(uuid={:s}). status_code={:s} reply={:s}'
+            LOGGER.error(msg.format(str(uuid), str(response.status_code), str(response)))
+        results.append(response.status_code in HTTP_OK_CODES)
+    return results
+
+if __name__ == '__main__':
+    ROOT_URL = 'https://127.0.0.1:8443'
+    SERVICE_UUID = 'my-service'
+
+    create_connectivity_service(ROOT_URL, SERVICE_UUID, '172.18.0.1', '1', '172.18.0.2', '2', 300)
+    delete_connectivity_service(ROOT_URL, SERVICE_UUID)
diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile
index 7c718890fcf3f07b32f66eca2ecab41f2eb30fbb..2a1510954dbd2a9b0817f94145baaa22ac9d3a3f 100644
--- a/src/webui/Dockerfile
+++ b/src/webui/Dockerfile
@@ -79,6 +79,7 @@ COPY --chown=webui:webui src/device/__init__.py device/__init__.py
 COPY --chown=webui:webui src/device/client/. device/client/
 COPY --chown=webui:webui src/load_generator/__init__.py load_generator/__init__.py
 COPY --chown=webui:webui src/load_generator/client/. load_generator/client/
+COPY --chown=webui:webui src/load_generator/tools/. load_generator/tools/
 COPY --chown=webui:webui src/service/__init__.py service/__init__.py
 COPY --chown=webui:webui src/service/client/. service/client/
 COPY --chown=webui:webui src/slice/__init__.py slice/__init__.py
diff --git a/src/webui/grafana_prom_component_rpc.json b/src/webui/grafana_prom_component_rpc.json
new file mode 100644
index 0000000000000000000000000000000000000000..ce40c2854df2f71fe07601ca4fada945cab22fa6
--- /dev/null
+++ b/src/webui/grafana_prom_component_rpc.json
@@ -0,0 +1,427 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": null,
+    "iteration": 1671297223428,
+    "links": [],
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "7.5.4",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_started_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "started",
+            "refId": "A"
+          },
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_completed_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "completed",
+            "refId": "B"
+          },
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_failed_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "failed",
+            "refId": "C"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Requests",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:935",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:936",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "cards": {
+          "cardPadding": null,
+          "cardRound": null
+        },
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "linear",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "max": null,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "gridPos": {
+          "h": 8,
+          "w": 24,
+          "x": 0,
+          "y": 6
+        },
+        "heatmap": {},
+        "hideZeroBuckets": true,
+        "highlightCards": true,
+        "id": 2,
+        "interval": "60s",
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(\r\n    max_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m])\r\n) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "1m",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "refId": "A"
+          }
+        ],
+        "title": "Histogram",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "xBucketNumber": null,
+        "xBucketSize": null,
+        "yAxis": {
+          "decimals": null,
+          "format": "s",
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true,
+          "splitFactor": null
+        },
+        "yBucketBound": "auto",
+        "yBucketNumber": null,
+        "yBucketSize": null
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 14
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "7.5.4",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_[[component]]_rpc_[[method]]_histogram_duration_sum{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "total time",
+            "refId": "B"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Total Exec Time",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:407",
+            "format": "s",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:408",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 27,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": null,
+          "current": {
+            "selected": false,
+            "text": "context",
+            "value": "context"
+          },
+          "datasource": "prometheus",
+          "definition": "metrics(tfs_)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": false,
+          "label": "Component",
+          "multi": false,
+          "name": "component",
+          "options": [],
+          "query": {
+            "query": "metrics(tfs_)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/tfs_(.+)_rpc_.*/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": "",
+          "current": {
+            "selected": false,
+            "text": "getcontext",
+            "value": "getcontext"
+          },
+          "datasource": "prometheus",
+          "definition": "metrics(tfs_[[component]]_rpc_)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": false,
+          "label": "Method",
+          "multi": false,
+          "name": "method",
+          "options": [],
+          "query": {
+            "query": "metrics(tfs_[[component]]_rpc_)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/tfs_[[component]]_rpc_(.+)_histogram_duration_bucket/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": "prometheus",
+          "definition": "label_values(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket, pod)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Pod",
+          "multi": true,
+          "name": "pod",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket, pod)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / Component RPCs",
+    "uid": "tfs-comp-rpc",
+    "version": 21
+  }
+}
diff --git a/src/webui/grafana_prom_device_config_exec_details.json b/src/webui/grafana_prom_device_config_exec_details.json
new file mode 100644
index 0000000000000000000000000000000000000000..4b29a8dcaa99d0527e188790c2a1ff9ca000738a
--- /dev/null
+++ b/src/webui/grafana_prom_device_config_exec_details.json
@@ -0,0 +1,184 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": null,
+    "iteration": 1682003744753,
+    "links": [],
+    "panels": [
+      {
+        "cards": {
+          "cardPadding": null,
+          "cardRound": null
+        },
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "linear",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "max": null,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "gridPos": {
+          "h": 22,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "heatmap": {},
+        "hideZeroBuckets": true,
+        "highlightCards": true,
+        "id": 2,
+        "interval": "60s",
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(\r\n    max_over_time(tfs_device_exec_details_configuredevice_histogram_duration_bucket{pod=~\"[[pod]]\", step_name=~\"[[step_name]]\"}[1m]) -\r\n    min_over_time(tfs_device_exec_details_configuredevice_histogram_duration_bucket{pod=~\"[[pod]]\", step_name=~\"[[step_name]]\"}[1m])\r\n) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "1m",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "refId": "A"
+          }
+        ],
+        "title": "Histogram",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "xBucketNumber": null,
+        "xBucketSize": null,
+        "yAxis": {
+          "decimals": null,
+          "format": "s",
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true,
+          "splitFactor": null
+        },
+        "yBucketBound": "auto",
+        "yBucketNumber": null,
+        "yBucketSize": null
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 27,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": "prometheus",
+          "definition": "label_values(tfs_device_exec_details_configuredevice_histogram_duration_bucket, pod)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Pod",
+          "multi": true,
+          "name": "pod",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_exec_details_configuredevice_histogram_duration_bucket, pod)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": "prometheus",
+          "definition": "label_values(tfs_device_exec_details_configuredevice_histogram_duration_bucket, step_name)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Step Name",
+          "multi": true,
+          "name": "step_name",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_exec_details_configuredevice_histogram_duration_bucket, step_name)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / ConfigureDevice Details",
+    "uid": "tfs-dev-confdev",
+    "version": 4
+  }
+}
diff --git a/src/webui/grafana_prom_device_driver.json b/src/webui/grafana_prom_device_driver.json
new file mode 100644
index 0000000000000000000000000000000000000000..af4ccca88a905b7ebb25ca0e23506b7b618011ad
--- /dev/null
+++ b/src/webui/grafana_prom_device_driver.json
@@ -0,0 +1,432 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": null,
+    "iteration": 1671318718779,
+    "links": [],
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "7.5.4",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_device_driver_[[method]]_counter_requests_started_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "started",
+            "refId": "A"
+          },
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_device_driver_[[method]]_counter_requests_completed_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "completed",
+            "refId": "B"
+          },
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_device_driver_[[method]]_counter_requests_failed_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "failed",
+            "refId": "C"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Requests",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:864",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:865",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "cards": {
+          "cardPadding": null,
+          "cardRound": null
+        },
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "linear",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "max": null,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "gridPos": {
+          "h": 8,
+          "w": 24,
+          "x": 0,
+          "y": 6
+        },
+        "heatmap": {},
+        "hideZeroBuckets": true,
+        "highlightCards": true,
+        "id": 2,
+        "interval": "60s",
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(\r\n    max_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"}[1m])\r\n) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "60s",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "refId": "A"
+          }
+        ],
+        "timeFrom": null,
+        "title": "Histogram",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "xBucketNumber": null,
+        "xBucketSize": null,
+        "yAxis": {
+          "decimals": null,
+          "format": "s",
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true,
+          "splitFactor": null
+        },
+        "yBucketBound": "auto",
+        "yBucketNumber": null,
+        "yBucketSize": null
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 14
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "7.5.4",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_device_driver_[[method]]_histogram_duration_sum{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "total time",
+            "refId": "B"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Total Exec Time",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:407",
+            "format": "s",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:408",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 27,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": "",
+          "current": {
+            "selected": false,
+            "text": "setconfig",
+            "value": "setconfig"
+          },
+          "datasource": "prometheus",
+          "definition": "metrics(tfs_device_driver_.+)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": false,
+          "label": "Method",
+          "multi": false,
+          "name": "method",
+          "options": [],
+          "query": {
+            "query": "metrics(tfs_device_driver_.+)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/tfs_device_driver_(.+config)_histogram_duration_bucket/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": "prometheus",
+          "definition": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, driver)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Driver",
+          "multi": true,
+          "name": "driver",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, driver)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": "prometheus",
+          "definition": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, pod)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Pod",
+          "multi": true,
+          "name": "pod",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, pod)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/deviceservice-(.*)/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / Device / Driver",
+    "uid": "tfs-dev-drv",
+    "version": 30
+  }
+}
diff --git a/src/webui/grafana_prom_load_generator.json b/src/webui/grafana_prom_load_generator.json
new file mode 100644
index 0000000000000000000000000000000000000000..efdc8a1180e0d44f726becf5c8125c34779f7c78
--- /dev/null
+++ b/src/webui/grafana_prom_load_generator.json
@@ -0,0 +1,399 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": {
+            "type": "datasource",
+            "uid": "grafana"
+          },
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "target": {
+            "limit": 100,
+            "matchAny": false,
+            "tags": [],
+            "type": "dashboard"
+          },
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "fiscalYearStartMonth": 0,
+    "graphTooltip": 0,
+    "id": null,
+    "iteration": 1682528742676,
+    "links": [],
+    "liveNow": false,
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": {
+          "type": "prometheus",
+          "uid": "prometheus"
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "8.5.22",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_started_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "started",
+            "refId": "A"
+          },
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_completed_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "completed",
+            "refId": "B"
+          },
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_failed_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "failed",
+            "refId": "C"
+          },
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "editorMode": "code",
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_blocked_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "blocked",
+            "range": true,
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeRegions": [],
+        "title": "Requests",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "mode": "time",
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:935",
+            "format": "short",
+            "logBase": 1,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:936",
+            "format": "short",
+            "logBase": 1,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false
+        }
+      },
+      {
+        "cards": {},
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "linear",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": {
+          "type": "prometheus",
+          "uid": "prometheus"
+        },
+        "gridPos": {
+          "h": 8,
+          "w": 24,
+          "x": 0,
+          "y": 6
+        },
+        "heatmap": {},
+        "hideZeroBuckets": true,
+        "highlightCards": true,
+        "id": 2,
+        "interval": "60s",
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(\r\n    max_over_time(tfs_loadgen_requests_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_loadgen_requests_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m])\r\n) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "1m",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "refId": "A"
+          }
+        ],
+        "title": "Histogram",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "yAxis": {
+          "format": "s",
+          "logBase": 1,
+          "show": true
+        },
+        "yBucketBound": "auto"
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": {
+          "type": "prometheus",
+          "uid": "prometheus"
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 14
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "8.5.22",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_histogram_duration_sum{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "total time",
+            "refId": "B"
+          }
+        ],
+        "thresholds": [],
+        "timeRegions": [],
+        "title": "Total Exec Time",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "mode": "time",
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:407",
+            "format": "s",
+            "logBase": 1,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:408",
+            "format": "short",
+            "logBase": 1,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false
+        }
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 36,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": false,
+            "text": "setup",
+            "value": "setup"
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "metrics(tfs_loadgen_requests_)",
+          "hide": 0,
+          "includeAll": false,
+          "label": "Method",
+          "multi": false,
+          "name": "method",
+          "options": [],
+          "query": {
+            "query": "metrics(tfs_loadgen_requests_)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/tfs_loadgen_requests_(.+)_histogram_duration_bucket/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "label_values(tfs_loadgen_requests_[[method]]_histogram_duration_bucket, pod)",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Pod",
+          "multi": true,
+          "name": "pod",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_loadgen_requests_[[method]]_histogram_duration_bucket, pod)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / Load Generator Status",
+    "uid": "tfs-loadgen-stats",
+    "version": 3,
+    "weekStart": ""
+  }
+}
diff --git a/src/webui/grafana_prom_service_handler.json b/src/webui/grafana_prom_service_handler.json
new file mode 100644
index 0000000000000000000000000000000000000000..86f4b13d6f654af7d1f32f865ae2f6f508b7296c
--- /dev/null
+++ b/src/webui/grafana_prom_service_handler.json
@@ -0,0 +1,432 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": null,
+    "iteration": 1671319012315,
+    "links": [],
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "7.5.4",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_service_handler_[[method]]_counter_requests_started_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "started",
+            "refId": "A"
+          },
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_service_handler_[[method]]_counter_requests_completed_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "completed",
+            "refId": "B"
+          },
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_service_handler_[[method]]_counter_requests_failed_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "failed",
+            "refId": "C"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Requests",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:935",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:936",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "cards": {
+          "cardPadding": null,
+          "cardRound": null
+        },
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "linear",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "max": null,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "gridPos": {
+          "h": 8,
+          "w": 24,
+          "x": 0,
+          "y": 6
+        },
+        "heatmap": {},
+        "hideZeroBuckets": true,
+        "highlightCards": true,
+        "id": 2,
+        "interval": "60s",
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(\r\n    max_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"}[1m])\r\n) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "1m",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "refId": "A"
+          }
+        ],
+        "timeFrom": null,
+        "title": "Histogram",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "xBucketNumber": null,
+        "xBucketSize": null,
+        "yAxis": {
+          "decimals": null,
+          "format": "s",
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true,
+          "splitFactor": null
+        },
+        "yBucketBound": "auto",
+        "yBucketNumber": null,
+        "yBucketSize": null
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 14
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "7.5.4",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_service_handler_[[method]]_histogram_duration_sum{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "total time",
+            "refId": "B"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Total Exec Time",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:407",
+            "format": "s",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:408",
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 27,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": "",
+          "current": {
+            "selected": false,
+            "text": "setendpoint",
+            "value": "setendpoint"
+          },
+          "datasource": "prometheus",
+          "definition": "metrics(tfs_service_handler_.+)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": false,
+          "label": "Method",
+          "multi": false,
+          "name": "method",
+          "options": [],
+          "query": {
+            "query": "metrics(tfs_service_handler_.+)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/tfs_service_handler_(.+)_histogram_duration_bucket/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": "prometheus",
+          "definition": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, handler)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Handler",
+          "multi": true,
+          "name": "handler",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, handler)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": "prometheus",
+          "definition": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, pod)",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Pod",
+          "multi": true,
+          "name": "pod",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, pod)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/serviceservice-(.*)/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / Service / Handler",
+    "uid": "tfs-svc-hdlr",
+    "version": 16
+  }
+}
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index fca1071419b3b2b61739c2a0d1d8bfa45aba5119..e7f50ed42d19921b3423617f6860b5630e93adba 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -95,6 +95,9 @@ def create_app(use_config=None, web_app_root=None):
     from webui.service.link.routes import link              # pylint: disable=import-outside-toplevel
     app.register_blueprint(link)
 
+    from webui.service.policy.routes import policy          # pylint: disable=import-outside-toplevel
+    app.register_blueprint(policy)
+
     app.jinja_env.globals.update({              # pylint: disable=no-member
         'enumerate'           : enumerate,
         'json_to_list'        : json_to_list,
diff --git a/src/webui/service/device/forms.py b/src/webui/service/device/forms.py
index c6bacac9bc1723a020f3057fad9c9e8306c9dbca..24bc92b3a5a4aec4321c07b17830f6111be7176d 100644
--- a/src/webui/service/device/forms.py
+++ b/src/webui/service/device/forms.py
@@ -29,6 +29,7 @@ class AddDeviceForm(FlaskForm):
     device_drivers_ietf_network_topology = BooleanField('IETF_NETWORK_TOPOLOGY')
     device_drivers_onf_tr_352 = BooleanField('ONF_TR_352')
     device_drivers_xr = BooleanField('XR')
+    device_drivers_ietf_l2vpn = BooleanField('IETF L2VPN')
     device_config_address = StringField('connect/address',default='127.0.0.1',validators=[DataRequired(), Length(min=5)])
     device_config_port = StringField('connect/port',default='0',validators=[DataRequired(), Length(min=1)])
     device_config_settings = TextAreaField('connect/settings',default='{}',validators=[DataRequired(), Length(min=2)])
diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py
index ebf77a35ffdf9c2546ddbdd1bac0c8c1f54a2b56..bc46847704b28fb6ef44de0aae030ccb67935928 100644
--- a/src/webui/service/device/routes.py
+++ b/src/webui/service/device/routes.py
@@ -120,6 +120,8 @@ def add():
             device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352)
         if form.device_drivers_xr.data:
             device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR)
+        if form.device_drivers_ietf_l2vpn.data:
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN)
         device_obj.device_drivers.extend(device_drivers) # pylint: disable=no-member
 
         try:
diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py
index 4e0020b04f33152de382f5b93af9735f8d737f92..e0d11800cf9fbd9b0e195de7aa85eede272fe28e 100644
--- a/src/webui/service/load_gen/forms.py
+++ b/src/webui/service/load_gen/forms.py
@@ -14,11 +14,17 @@
 
 from flask_wtf import FlaskForm
 from wtforms import BooleanField, FloatField, IntegerField, StringField, SubmitField
-from wtforms.validators import DataRequired, NumberRange
+from wtforms.validators import DataRequired, NumberRange, Regexp
+from load_generator.tools.ListScalarRange import RE_SCALAR_RANGE_LIST
+
+DEFAULT_AVAILABILITY   = '0.0..99.9999'
+DEFAULT_CAPACITY_GBPS  = '0.1..100.00' #'10, 40, 50, 100, 400'
+DEFAULT_E2E_LATENCY_MS = '5.0..100.00'
 
 class LoadGenForm(FlaskForm):
     num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)])
     num_generated = IntegerField('Num Generated', default=0, render_kw={'readonly': True})
+    num_released = IntegerField('Num Released', default=0, render_kw={'readonly': True})
 
     request_type_service_l2nm = BooleanField('Service L2NM', default=False)
     request_type_service_l3nm = BooleanField('Service L3NM', default=False)
@@ -31,6 +37,12 @@ class LoadGenForm(FlaskForm):
     holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)])
     inter_arrival_time = FloatField('Inter Arrival Time [seconds]', default=0, validators=[NumberRange(min=0.0)])
 
+    availability   = StringField('Availability [%]', default=DEFAULT_AVAILABILITY,   validators=[Regexp(RE_SCALAR_RANGE_LIST)])
+    capacity_gbps  = StringField('Capacity [Gbps]',  default=DEFAULT_CAPACITY_GBPS,  validators=[Regexp(RE_SCALAR_RANGE_LIST)])
+    e2e_latency_ms = StringField('E2E Latency [ms]', default=DEFAULT_E2E_LATENCY_MS, validators=[Regexp(RE_SCALAR_RANGE_LIST)])
+
+    max_workers = IntegerField('Max Workers', default=10, validators=[DataRequired(), NumberRange(min=1)])
+
     do_teardown = BooleanField('Do Teardown', default=True)
 
     record_to_dlt = BooleanField('Record to DLT', default=False)
diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py
index 5f47f06b0ff59ad1383aab94caa41adc08440c87..f05f57f6d5aab83c0752dda15e0b858c9a4d53a3 100644
--- a/src/webui/service/load_gen/routes.py
+++ b/src/webui/service/load_gen/routes.py
@@ -17,6 +17,8 @@ from flask import redirect, render_template, Blueprint, flash, url_for
 from common.proto.context_pb2 import Empty
 from common.proto.load_generator_pb2 import Parameters, RequestTypeEnum
 from load_generator.client.LoadGeneratorClient import LoadGeneratorClient
+from load_generator.tools.ListScalarRange import (
+    list_scalar_range__grpc_to_str, list_scalar_range__list_to_grpc, parse_list_scalar_range)
 from .forms import LoadGenForm
 
 load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen')
@@ -55,11 +57,19 @@ def home():
     _holding_time       = round(status.parameters.holding_time       , ndigits=4)
     _inter_arrival_time = round(status.parameters.inter_arrival_time , ndigits=4)
 
+    _availability       = list_scalar_range__grpc_to_str(status.parameters.availability  )
+    _capacity_gbps      = list_scalar_range__grpc_to_str(status.parameters.capacity_gbps )
+    _e2e_latency_ms     = list_scalar_range__grpc_to_str(status.parameters.e2e_latency_ms)
+
     form = LoadGenForm()
     set_properties(form.num_requests             , status.parameters.num_requests , readonly=status.running)
     set_properties(form.offered_load             , _offered_load                  , readonly=status.running)
     set_properties(form.holding_time             , _holding_time                  , readonly=status.running)
     set_properties(form.inter_arrival_time       , _inter_arrival_time            , readonly=status.running)
+    set_properties(form.availability             , _availability                  , readonly=status.running)
+    set_properties(form.capacity_gbps            , _capacity_gbps                 , readonly=status.running)
+    set_properties(form.e2e_latency_ms           , _e2e_latency_ms                , readonly=status.running)
+    set_properties(form.max_workers              , status.parameters.max_workers  , readonly=status.running)
     set_properties(form.do_teardown              , status.parameters.do_teardown  , disabled=status.running)
     set_properties(form.record_to_dlt            , status.parameters.record_to_dlt, disabled=status.running)
     set_properties(form.dlt_domain_id            , status.parameters.dlt_domain_id, readonly=status.running)
@@ -70,6 +80,7 @@ def home():
     set_properties(form.request_type_slice_l2nm  , _request_type_slice_l2nm       , disabled=status.running)
     set_properties(form.request_type_slice_l3nm  , _request_type_slice_l3nm       , disabled=status.running)
     set_properties(form.num_generated            , status.num_generated           , disabled=True)
+    set_properties(form.num_released             , status.num_released            , disabled=True)
     set_properties(form.infinite_loop            , status.infinite_loop           , disabled=True)
     set_properties(form.running                  , status.running                 , disabled=True)
 
@@ -82,16 +93,25 @@ def start():
     form = LoadGenForm()
     if form.validate_on_submit():
         try:
+            _availability   = parse_list_scalar_range(form.availability.data  )
+            _capacity_gbps  = parse_list_scalar_range(form.capacity_gbps.data )
+            _e2e_latency_ms = parse_list_scalar_range(form.e2e_latency_ms.data)
+
             load_gen_params = Parameters()
             load_gen_params.num_requests       = form.num_requests.data
             load_gen_params.offered_load       = form.offered_load.data
             load_gen_params.holding_time       = form.holding_time.data
             load_gen_params.inter_arrival_time = form.inter_arrival_time.data
+            load_gen_params.max_workers        = form.max_workers.data
             load_gen_params.do_teardown        = form.do_teardown.data
             load_gen_params.dry_mode           = False
             load_gen_params.record_to_dlt      = form.record_to_dlt.data
             load_gen_params.dlt_domain_id      = form.dlt_domain_id.data
 
+            list_scalar_range__list_to_grpc(_availability,   load_gen_params.availability  ) # pylint: disable=no-member
+            list_scalar_range__list_to_grpc(_capacity_gbps,  load_gen_params.capacity_gbps ) # pylint: disable=no-member
+            list_scalar_range__list_to_grpc(_e2e_latency_ms, load_gen_params.e2e_latency_ms) # pylint: disable=no-member
+
             del load_gen_params.request_types[:] # pylint: disable=no-member
             request_types = list()
             if form.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM)
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 32cefddf3b2a8251623b60fd9fc039588cd6b9bb..75f036befd4bed3bb3bd743b9f423bf21c014e55 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -131,25 +131,18 @@ def topology():
         topology_uuid = session['topology_uuid']
 
         json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
-        grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
+        response = context_client.GetTopologyDetails(TopologyId(**json_topo_id))
 
-        topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}
-        topo_link_uuids   = {link_id  .link_uuid  .uuid for link_id   in grpc_topology.link_ids  }
-
-        response = context_client.ListDevices(Empty())
         devices = []
         for device in response.devices:
-            if device.device_id.device_uuid.uuid not in topo_device_uuids: continue
             devices.append({
                 'id': device.device_id.device_uuid.uuid,
                 'name': device.name,
                 'type': device.device_type,
             })
 
-        response = context_client.ListLinks(Empty())
         links = []
         for link in response.links:
-            if link.link_id.link_uuid.uuid not in topo_link_uuids: continue
             if len(link.link_endpoint_ids) != 2:
                 str_link = grpc_message_to_json_string(link)
                 LOGGER.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link))
diff --git a/src/webui/service/policy/__init__.py b/src/webui/service/policy/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/webui/service/policy/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/webui/service/policy/routes.py b/src/webui/service/policy/routes.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d14f86b4f1428695b474b3f2e2dd4dc72657452
--- /dev/null
+++ b/src/webui/service/policy/routes.py
@@ -0,0 +1,50 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc
+from flask import render_template, Blueprint
+from common.proto.context_pb2 import Empty
+from common.proto.policy_pb2 import PolicyRuleStateEnum
+from context.client.ContextClient import ContextClient
+
+policy = Blueprint('policy', __name__, url_prefix='/policy')
+
+context_client = ContextClient()
+
+@policy.get('/')
+def home():
+    context_client.connect()
+    policy_rules = context_client.ListPolicyRules(Empty())
+    policy_rules = policy_rules.policyRules
+    context_client.close()
+    return render_template('policy/home.html', policy_rules=policy_rules, prse=PolicyRuleStateEnum)
+
+#@policy.get('<path:policy_uuid>/detail')
+#def detail(policy_uuid: str):
+#    try:
+#        context_client.connect()
+#
+#        slice_obj = get_slice_by_uuid(context_client, slice_uuid, rw_copy=False)
+#        if slice_obj is None:
+#            flash('Context({:s})/Slice({:s}) not found'.format(str(context_uuid), str(slice_uuid)), 'danger')
+#            slice_obj = Slice()
+#
+#        context_client.close()
+#
+#        return render_template(
+#            'slice/detail.html', slice=slice_obj, prse=PolicyRuleStateEnum)
+#    except Exception as e:
+#        flash('The system encountered an error and cannot show the details of this slice.', 'warning')
+#        current_app.logger.exception(e)
+#        return redirect(url_for('slice.home'))
diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py
index 70a5b5bad41df6520cb2facdad94cfee04f726cd..08312e5257d13c4b55b83733ded689c7565c4790 100644
--- a/src/webui/service/service/routes.py
+++ b/src/webui/service/service/routes.py
@@ -18,17 +18,18 @@ import grpc
 from collections import defaultdict
 from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for, request
 from common.proto.context_pb2 import (
-    IsolationLevelEnum, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection, Empty, DeviceDriverEnum, ConfigActionEnum, Device, DeviceList)
+    IsolationLevelEnum, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection, Empty, DeviceDriverEnum,
+    ConfigActionEnum, Device, DeviceList)
 from common.tools.context_queries.Context import get_context
 from common.tools.context_queries.Topology import get_topology
 from common.tools.context_queries.EndPoint import get_endpoint_names
-from common.tools.context_queries.Service import get_service
+from common.tools.context_queries.Service import get_service_by_uuid
+from common.tools.object_factory.ConfigRule import json_config_rule_set
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from service.client.ServiceClient import ServiceClient
 from typing import Optional, Set
-from common.tools.object_factory.Topology import json_topology_id
-from common.tools.object_factory.ConfigRule import json_config_rule_set
-from common.tools.object_factory.Context import json_context_id
 
 service = Blueprint('service', __name__, url_prefix='/service')
 
@@ -254,7 +255,7 @@ def detail(service_uuid: str):
         context_client.connect()
 
         endpoint_ids = list()
-        service_obj = get_service(context_client, service_uuid, rw_copy=False)
+        service_obj = get_service_by_uuid(context_client, service_uuid, rw_copy=False)
         if service_obj is None:
             flash('Context({:s})/Service({:s}) not found'.format(str(context_uuid), str(service_uuid)), 'danger')
             service_obj = Service()
diff --git a/src/webui/service/slice/routes.py b/src/webui/service/slice/routes.py
index cd1b672d5c1014b0e8aa301ed7b5a1f6d910f6df..a66b316b8f1e2e3266ce37fa8f55f4f8a1042ca1 100644
--- a/src/webui/service/slice/routes.py
+++ b/src/webui/service/slice/routes.py
@@ -17,7 +17,7 @@ from flask import current_app, redirect, render_template, Blueprint, flash, sess
 from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum
 from common.tools.context_queries.Context import get_context
 from common.tools.context_queries.EndPoint import get_endpoint_names
-from common.tools.context_queries.Slice import get_slice
+from common.tools.context_queries.Slice import get_slice_by_uuid
 from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
 
@@ -76,7 +76,7 @@ def detail(slice_uuid: str):
     try:
         context_client.connect()
 
-        slice_obj = get_slice(context_client, slice_uuid, rw_copy=False)
+        slice_obj = get_slice_by_uuid(context_client, slice_uuid, rw_copy=False)
         if slice_obj is None:
             flash('Context({:s})/Slice({:s}) not found'.format(str(context_uuid), str(slice_uuid)), 'danger')
             slice_obj = Slice()
diff --git a/src/webui/service/static/topology_icons/Acknowledgements.txt b/src/webui/service/static/topology_icons/Acknowledgements.txt
index b285d225957b0a4e8c14ac4ae5e078597d2a1b27..de69c89cee25dc93856761ac2dd08d9988a35095 100644
--- a/src/webui/service/static/topology_icons/Acknowledgements.txt
+++ b/src/webui/service/static/topology_icons/Acknowledgements.txt
@@ -24,3 +24,10 @@ https://symbols.getvecta.com/stencil_241/213_programmable-sw.32d3794d56.png => e
 
 https://symbols.getvecta.com/stencil_240/275_wae.c06b769cd7.png => optical-transponder.png
 https://symbols.getvecta.com/stencil_241/289_wae.216d930c17.png => emu-optical-transponder.png
+
+https://symbols.getvecta.com/stencil_240/128_localdirector.c1e561769f.png => optical-splitter.png
+https://symbols.getvecta.com/stencil_241/158_local-director.6b38eab9e4.png => emu-optical-splitter.png
+
+
+https://symbols.getvecta.com/stencil_240/197_radio-tower.b6138c8c29.png => radio-router.png
+https://symbols.getvecta.com/stencil_241/216_radio-tower.5159339bc0.png => emu-radio-router.png
\ No newline at end of file
diff --git a/src/webui/service/static/topology_icons/emu-optical-splitter.png b/src/webui/service/static/topology_icons/emu-optical-splitter.png
new file mode 100644
index 0000000000000000000000000000000000000000..12b7727d68ef749b52fcdd592c0427f63b58dc75
Binary files /dev/null and b/src/webui/service/static/topology_icons/emu-optical-splitter.png differ
diff --git a/src/webui/service/static/topology_icons/emu-packet-radio-router.png b/src/webui/service/static/topology_icons/emu-packet-radio-router.png
new file mode 100644
index 0000000000000000000000000000000000000000..00257d0e2ee357dbdd392a408cfdbe07e006ff2a
Binary files /dev/null and b/src/webui/service/static/topology_icons/emu-packet-radio-router.png differ
diff --git a/src/webui/service/static/topology_icons/emu-xr-constellation.png b/src/webui/service/static/topology_icons/emu-xr-constellation.png
new file mode 100644
index 0000000000000000000000000000000000000000..d3bea498a4cd6d8a455d997e4833079f3e6b714f
Binary files /dev/null and b/src/webui/service/static/topology_icons/emu-xr-constellation.png differ
diff --git a/src/webui/service/static/topology_icons/optical-splitter.png b/src/webui/service/static/topology_icons/optical-splitter.png
new file mode 100644
index 0000000000000000000000000000000000000000..90a3d79b8ed4b8ae15f3d4a349cd08d741dcfdaf
Binary files /dev/null and b/src/webui/service/static/topology_icons/optical-splitter.png differ
diff --git a/src/webui/service/static/topology_icons/packet-radio-router.png b/src/webui/service/static/topology_icons/packet-radio-router.png
new file mode 100644
index 0000000000000000000000000000000000000000..025172a587890341061b11ae57ce30184b8bc2f0
Binary files /dev/null and b/src/webui/service/static/topology_icons/packet-radio-router.png differ
diff --git a/src/webui/service/static/topology_icons/teraflowsdn.png b/src/webui/service/static/topology_icons/teraflowsdn.png
new file mode 100644
index 0000000000000000000000000000000000000000..ed2232e8223a39eb0d829e0e50975a697b0660fc
Binary files /dev/null and b/src/webui/service/static/topology_icons/teraflowsdn.png differ
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 1dfa3687198d8a33db346ba2bbcd2989f6f109bb..61c283b0d957b4d13b7cc57e47d3ea2675ab76f0 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -83,6 +83,13 @@
                   <a class="nav-link" href="{{ url_for('slice.home') }}">Slice</a>
                   {% endif %}
                 </li>
+                <li class="nav-item">
+                  {% if '/policy/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('policy.home') }}">Policy</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('policy.home') }}">Policy</a>
+                  {% endif %}
+                </li>
                 <li class="nav-item">
                   <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
                 </li>
diff --git a/src/webui/service/templates/js/topology.js b/src/webui/service/templates/js/topology.js
index 50486d2a6826fedace55f7a62592fa083e7256a6..1b34f2b2c737c96ef18e925bd76ef28451f4120f 100644
--- a/src/webui/service/templates/js/topology.js
+++ b/src/webui/service/templates/js/topology.js
@@ -31,8 +31,8 @@ const margin = {top: 5, right: 5, bottom: 5, left: 5};
 const icon_width  = 40;
 const icon_height = 40;
 
-width = 1000 - margin.left - margin.right;
-height = 600 - margin.top - margin.bottom;
+width = 1400 - margin.left - margin.right;
+height = 800 - margin.top - margin.bottom;
 
 //function handleZoom(e) {
 //    console.dir(e);
@@ -70,11 +70,21 @@ var simulation = d3.forceSimulation();
 // load the data
 d3.json("{{ url_for('main.topology') }}", function(data) {
     // set the data and properties of link lines and node circles
-    link = svg.append("g").attr("class", "links").style('stroke', '#aaa')
+    link = svg.append("g").attr("class", "links")//.style('stroke', '#aaa')
         .selectAll("line")
         .data(data.links)
         .enter()
-        .append("line");
+        .append("line")
+        .attr("opacity", 1)
+        .attr("stroke", function(l) {
+            return l.name.toLowerCase().includes('mgmt') ? '#AAAAAA' : '#555555';
+        })
+        .attr("stroke-width", function(l) {
+            return l.name.toLowerCase().includes('mgmt') ? 1 : 2;
+        })
+        .attr("stroke-dasharray", function(l) {
+            return l.name.toLowerCase().includes('mgmt') ? "5,5" : "0";
+        });
     node = svg.append("g").attr("class", "devices").attr('r', 20).style('fill', '#69b3a2')
         .selectAll("circle")
         .data(data.devices)
@@ -93,9 +103,9 @@ d3.json("{{ url_for('main.topology') }}", function(data) {
     link.append("title").text(function(l) { return l.name; });
 
     // link style
-    link
-        .attr("stroke-width", forceProperties.link.enabled ? 2 : 1)
-        .attr("opacity", forceProperties.link.enabled ? 1 : 0);
+    //link
+    //    .attr("stroke-width", forceProperties.link.enabled ? 2 : 1)
+    //    .attr("opacity", forceProperties.link.enabled ? 1 : 0);
     
     // set up the simulation and event to update locations after each tick
     simulation.nodes(data.devices);
diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html
index acac4a55392c2bf7f6261707ae1627a486affd10..916abafde05b3ec990346ff7966f207b1dafc10a 100644
--- a/src/webui/service/templates/link/detail.html
+++ b/src/webui/service/templates/link/detail.html
@@ -37,6 +37,7 @@
                         <thead>
                             <tr>
                                 <th scope="col">Endpoint UUID</th>
+                                <th scope="col">Name</th>
                                 <th scope="col">Device</th>
                                 <th scope="col">Endpoint Type</th>
                             </tr>
@@ -44,6 +45,9 @@
                         <tbody>
                               {% for endpoint in link.link_endpoint_ids %}
                               <tr>
+                                   <td>
+                                        {{ endpoint.endpoint_uuid.uuid }}
+                                   </td>
                                    <td>
                                         {{ endpoints_data.get(endpoint.endpoint_uuid.uuid, (endpoint.endpoint_uuid.uuid, ''))[0] }}
                                    </td>
diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html
index d58f42601925ca438ab9d9f20b32f94960b5cada..5bedf66fad1fa2d1b5e38e3866acd95347c9559b 100644
--- a/src/webui/service/templates/load_gen/home.html
+++ b/src/webui/service/templates/load_gen/home.html
@@ -53,6 +53,21 @@
             </div>
             <br />
 
+            <div class="row mb-3">
+                {{ form.num_released.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.num_released.errors %}
+                        {{ form.num_released(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.num_released.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.num_released(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
             <div class="row mb-3">
                 <div class="col-sm-2 col-form-label">Service Types:</div>
                 <div class="col-sm-10">
@@ -113,6 +128,66 @@
             </div>
             <br />
 
+            <div class="row mb-3">
+                {{ form.availability.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.availability.errors %}
+                        {{ form.availability(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.availability.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.availability(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.capacity_gbps.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.capacity_gbps.errors %}
+                        {{ form.capacity_gbps(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.capacity_gbps.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.capacity_gbps(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.e2e_latency_ms.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.e2e_latency_ms.errors %}
+                        {{ form.e2e_latency_ms(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.e2e_latency_ms.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.e2e_latency_ms(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.max_workers.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.max_workers.errors %}
+                        {{ form.max_workers(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.max_workers.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.max_workers(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
             <div class="row mb-3">
                 <div class="col-sm-10">
                     {{ form.do_teardown }} {{ form.do_teardown.label(class="col-sm-3 col-form-label") }}<br/>
diff --git a/src/webui/service/templates/policy/home.html b/src/webui/service/templates/policy/home.html
new file mode 100644
index 0000000000000000000000000000000000000000..081a7f0b5291346633a2f682ba4552b5c1e362fb
--- /dev/null
+++ b/src/webui/service/templates/policy/home.html
@@ -0,0 +1,84 @@
+<!--
+ Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+    <h1>Policy</h1>
+
+    <div class="row">
+        <div class="col">
+            {{ policies | length }} policies found in context <i>{{ session['context_uuid'] }}</i>
+        </div>
+    </div>
+
+    <table class="table table-striped table-hover">
+        <thead>
+          <tr>
+            <th scope="col">UUID</th>
+            <th scope="col">Kind</th>
+            <th scope="col">Priority</th>
+            <th scope="col">Condition</th>
+            <th scope="col">Operator</th>
+            <th scope="col">Action</th>
+            <th scope="col">Service</th>
+            <th scope="col">Devices</th>
+            <th scope="col">State</th>
+            <th scope="col">Message</th>
+            <th scope="col">Extra</th>
+            <th scope="col"></th>
+          </tr>
+        </thead>
+        <tbody>
+            {% if policies %}
+                {% for policy in policies %}
+                    {% if policy.WhichOneof('policy_rule') == 'device' %}
+                        <tr>
+                            <td>{{ policy.device.policyRuleBasic.policyRuleId.uuid }}</td>
+                            <td>{{ policy.WhichOneof('policy_rule') }}</td>
+                            <td>{{ policy.device.policyRuleBasic.priority }}</td>
+                            <td>{{ policy.device.policyRuleBasic.conditionList }}</td>
+                            <td>{{ policy.device.policyRuleBasic.booleanOperator }}</td>
+                            <td>{{ policy.device.policyRuleBasic.actionList }}</td>
+                            <td>-</td>
+                            <td>{{ policy.device.deviceList }}</td>
+                            <td>{{ prse.Name(policy.device.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td>
+                            <td>{{ policy.device.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td>
+                        </tr>
+                    {% elif policy.WhichOneof('policy_rule') == 'service' %}
+                        <tr>
+                            <td>{{ policy.service.policyRuleBasic.policyRuleId.uuid }}</td>
+                            <td>{{ policy.WhichOneof('policy_rule') }}</td>
+                            <td>{{ policy.service.policyRuleBasic.priority }}</td>
+                            <td>{{ policy.service.policyRuleBasic.conditionList }}</td>
+                            <td>{{ policy.service.policyRuleBasic.booleanOperator }}</td>
+                            <td>{{ policy.service.policyRuleBasic.actionList }}</td>
+                            <td>{{ policy.service.serviceId }}</td>
+                            <td>{{ policy.service.deviceList }}</td>
+                            <td>{{ prse.Name(policy.service.policyRuleBasic.policyRuleState.policyRuleState).replace('POLICY_', '') }}</td>
+                            <td>{{ policy.service.policyRuleBasic.policyRuleState.policyRuleStateMessage }}</td>
+                        </tr>
+                    {% else %}
+                        <tr><td colspan="11">Unsupported policy type {{ policy.WhichOneof('policy_rule') }}</td></tr>
+                    {% endif %}
+                {% endfor %}
+            {% else %}
+                <tr><td colspan="11">No policies found</td></tr>
+            {% endif %}
+        </tbody>
+    </table>
+
+{% endblock %}
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html
index bee2e93c53896a8eeac826703a60afe02a5aa825..414aa19d0165ed7138f277005d5573c9242daefb 100644
--- a/src/webui/service/templates/service/detail.html
+++ b/src/webui/service/templates/service/detail.html
@@ -55,6 +55,7 @@
             <thead>
                 <tr>
                     <th scope="col">Endpoint UUID</th>
+                    <th scope="col">Name</th>
                     <th scope="col">Device</th>
                     <th scope="col">Endpoint Type</th>
                 </tr>
@@ -62,6 +63,9 @@
             <tbody>
                 {% for endpoint in service.service_endpoint_ids %}
                 <tr>
+                    <td>
+                        {{ endpoint.endpoint_uuid.uuid }}
+                   </td>
                     <td>
                         {{ endpoints_data.get(endpoint.endpoint_uuid.uuid, (endpoint.endpoint_uuid.uuid, ''))[0] }}
                     </td>
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
index 8f223e44deda37b177a360a51b1e366f680fac27..13b69defeb95f66aba47a4aa78f98631ca8cc367 100644
--- a/src/webui/service/templates/slice/detail.html
+++ b/src/webui/service/templates/slice/detail.html
@@ -55,6 +55,7 @@
             <thead>
                 <tr>
                     <th scope="col">Endpoint UUID</th>
+                    <th scope="col">Name</th>
                     <th scope="col">Device</th>
                     <th scope="col">Endpoint Type</th>
                 </tr>
@@ -62,6 +63,9 @@
             <tbody>
                 {% for endpoint in slice.slice_endpoint_ids %}
                 <tr>
+                    <td>
+                        {{ endpoint.endpoint_uuid.uuid }}
+                   </td>
                     <td>
                         {{ endpoints_data.get(endpoint.endpoint_uuid.uuid, (endpoint.endpoint_uuid.uuid, ''))[0] }}
                     </td>