diff --git a/deploy/all.sh b/deploy/all.sh
index 6f8331b769b6f84a13ac66b48ca2f861a8308ce5..9584dd32d121b7f63e7c7f177bf7bee8c287b4c9 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -147,6 +147,15 @@ export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
 
 
+# ----- K8s Observability ------------------------------------------------------
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -160,6 +169,9 @@ export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
 # Deploy QuestDB
 ./deploy/qdb.sh
 
+# Expose Dashboard
+./deploy/expose_dashboard.sh
+
 # Deploy TeraFlowSDN
 ./deploy/tfs.sh
 
diff --git a/deploy/crdb.sh b/deploy/crdb.sh
index 216339117d2156d0ae1beddb5a1d6a7ccbe33219..414de523d10f7d1edb99799e1f5889b340d8ad04 100755
--- a/deploy/crdb.sh
+++ b/deploy/crdb.sh
@@ -167,6 +167,11 @@ function crdb_drop_database_single() {
 }
 
 function crdb_deploy_cluster() {
+    echo "CockroachDB Operator Namespace"
+    echo ">>> Create CockroachDB Operator Namespace (if missing)"
+    kubectl apply -f "${CRDB_MANIFESTS_PATH}/pre_operator.yaml"
+    echo
+
     echo "Cockroach Operator CRDs"
     echo ">>> Apply Cockroach Operator CRDs (if they are missing)"
     cp "${CRDB_MANIFESTS_PATH}/crds.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml"
diff --git a/deploy/expose_dashboard.sh b/deploy/expose_dashboard.sh
new file mode 100755
index 0000000000000000000000000000000000000000..60b41c7b75d4f96a22151b1d4d68ba53c75a265c
--- /dev/null
+++ b/deploy/expose_dashboard.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+function expose_dashboard() {
+    echo "Prometheus Port Mapping"
+    echo ">>> Expose Prometheus HTTP Mgmt GUI port (9090->${PROM_EXT_PORT_HTTP})"
+    PROM_PORT_HTTP=$(kubectl --namespace monitoring get service prometheus-k8s -o 'jsonpath={.spec.ports[?(@.name=="web")].port}')
+    PATCH='{"data": {"'${PROM_EXT_PORT_HTTP}'": "monitoring/prometheus-k8s:'${PROM_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${PROM_EXT_PORT_HTTP}', "hostPort": '${PROM_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo "Grafana Port Mapping"
+    echo ">>> Expose Grafana HTTP Mgmt GUI port (3000->${GRAF_EXT_PORT_HTTP})"
+    GRAF_PORT_HTTP=$(kubectl --namespace monitoring get service grafana -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+    PATCH='{"data": {"'${GRAF_EXT_PORT_HTTP}'": "monitoring/grafana:'${GRAF_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${GRAF_EXT_PORT_HTTP}', "hostPort": '${GRAF_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+expose_dashboard
diff --git a/deploy/nats.sh b/deploy/nats.sh
index aa082b54ba8806c48f9b5a04c61f110b93b03d6a..b730cec4af66920e5a7d8a2235e63beff70e8694 100755
--- a/deploy/nats.sh
+++ b/deploy/nats.sh
@@ -53,7 +53,7 @@ function nats_deploy_single() {
         echo ">>> NATS is present; skipping step."
     else
         echo ">>> Deploy NATS"
-        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image.tag=2.9-alpine
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
 
         echo ">>> Waiting NATS statefulset to be created..."
         while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 4c6dc95d2e20dd92c73692aefd46c6fe4b348601..be83d7f5b2669abe8330adefa8a8feac27a1dab8 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -106,6 +106,15 @@ export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kp
 export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 
 
+# ----- K8s Observability ------------------------------------------------------
+
+# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
+
+# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
+
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -241,7 +250,8 @@ for COMPONENT in $TFS_COMPONENTS; do
 
     echo "  Adapting '$COMPONENT' manifest file..."
     MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
-    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    # cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
 
     if [ "$COMPONENT" == "pathcomp" ]; then
         IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
@@ -335,7 +345,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     # Exposed through the ingress controller "tfs-ingress"
     GRAFANA_URL="127.0.0.1:${EXT_HTTP_PORT}/grafana"
 
-    # Default Grafana credentials
+    # Default Grafana credentials when installed with the `monitoring` addon
     GRAFANA_USERNAME="admin"
     GRAFANA_PASSWORD="admin"
 
@@ -414,6 +424,20 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     }' ${GRAFANA_URL_UPDATED}/api/datasources
     printf "\n\n"
 
+    # adding the datasource of the metrics collection framework
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "prometheus",
+        "name"     : "Prometheus",
+        "url"      : "http://prometheus-k8s.monitoring.svc:9090",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "httpMethod"               : "POST"
+        }
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    printf "\n\n"
+
     echo ">> Creating dashboards..."
     # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
     curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
diff --git a/manifests/automationservice.yaml b/manifests/automationservice.yaml
deleted file mode 120000
index 5e8d3c1c82db0c03119f29865e2a7edabcdfb0eb..0000000000000000000000000000000000000000
--- a/manifests/automationservice.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../src/automation/target/kubernetes/kubernetes.yml
\ No newline at end of file
diff --git a/manifests/automationservice.yaml b/manifests/automationservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..73e6b1d7be076dbcf55014ae3accbc1e29e0c8e8
--- /dev/null
+++ b/manifests/automationservice.yaml
@@ -0,0 +1,125 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+  labels:
+    app.kubernetes.io/name: automationservice
+    app: automationservice
+  name: automationservice
+spec:
+  ports:
+    - name: grpc
+      port: 5050
+      targetPort: 5050
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 8080
+  selector:
+    app.kubernetes.io/name: automationservice
+  type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  annotations:
+    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+  labels:
+    app: automationservice
+    app.kubernetes.io/name: automationservice
+  name: automationservice
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: automationservice
+  template:
+    metadata:
+      annotations:
+        app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+      labels:
+        app: automationservice
+        app.kubernetes.io/name: automationservice
+    spec:
+      containers:
+        - env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
+            - name: DEVICE_SERVICE_HOST
+              value: deviceservice
+          image: labs.etsi.org:5050/tfs/controller/automation:0.2.0
+          imagePullPolicy: Always
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/live
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          name: automationservice
+          ports:
+            - containerPort: 5050
+              name: grpc
+              protocol: TCP
+            - containerPort: 8080
+              name: metrics
+              protocol: TCP
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/ready
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: automationservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: automationservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
diff --git a/manifests/cockroachdb/pre_operator.yaml b/manifests/cockroachdb/pre_operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..16718a77918491170502a5cbb864a6fda39c734a
--- /dev/null
+++ b/manifests/cockroachdb/pre_operator.yaml
@@ -0,0 +1,19 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: cockroach-operator
+  name: cockroach-operator-system
diff --git a/manifests/computeservice.yaml b/manifests/computeservice.yaml
index 378e34b9b4345a3a65f848dbd7a3b3e3753b8a05..3e3b041ab20968ad0010eb06f7900faa7b649dc9 100644
--- a/manifests/computeservice.yaml
+++ b/manifests/computeservice.yaml
@@ -34,6 +34,7 @@ spec:
         ports:
         - containerPort: 8080
         - containerPort: 9090
+        - containerPort: 9192
         env:
         - name: LOG_LEVEL
           value: "INFO"
@@ -70,3 +71,7 @@ spec:
     protocol: TCP
     port: 9090
     targetPort: 9090
+  - name: metrics
+    protocol: TCP
+    port: 9192
+    targetPort: 9192
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index b1e6eb89dc4ec92409dbd05bbe668987ea93828f..96735bf5f89f682f31131c123ee9884a1becbfdb 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -20,9 +20,11 @@ spec:
   selector:
     matchLabels:
       app: contextservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
       labels:
         app: contextservice
     spec:
@@ -52,11 +54,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:1010"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 250m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 1000m
+            memory: 1024Mi
 ---
 apiVersion: v1
 kind: Service
@@ -77,3 +79,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: contextservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: contextservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index ca2c81f0f2e5d874066464ab0537adeec734cfbb..ad54f4b6c2682c381c1c5238a013e1d12e177764 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -45,11 +45,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:2020"]
         resources:
           requests:
-            cpu: 50m
+            cpu: 128m
             memory: 64Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 256m
+            memory: 128Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml
index b94e11e725757fa2ec67de19f98ecfa6a03f085b..3f65c2c857a39f2b7a5ebeaccd9ddfd4916f2487 100644
--- a/manifests/load_generatorservice.yaml
+++ b/manifests/load_generatorservice.yaml
@@ -44,11 +44,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:50052"]
         resources:
           requests:
-            cpu: 50m
+            cpu: 256m
             memory: 64Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 512m
+            memory: 128Mi
 ---
 apiVersion: v1
 kind: Service
diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml
index fd3599f429f48ebb3cf3f8d802f8f61f00e1b41d..3ba12750b20a7093a570748e67a93922316a66f6 100644
--- a/manifests/pathcompservice.yaml
+++ b/manifests/pathcompservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: pathcompservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -53,6 +53,8 @@ spec:
       - name: backend
         image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest
         imagePullPolicy: Always
+        ports:
+        - containerPort: 8081
         #readinessProbe:
         #  httpGet:
         #    path: /health
@@ -96,3 +98,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: pathcompservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: pathcompservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml
deleted file mode 120000
index bb28f6e2cff4c6b50e44f049dec6a53d31922e86..0000000000000000000000000000000000000000
--- a/manifests/policyservice.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../src/policy/target/kubernetes/kubernetes.yml
\ No newline at end of file
diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..72da09ecaf1de9d080d686c63c0f18c88f09e8b4
--- /dev/null
+++ b/manifests/policyservice.yaml
@@ -0,0 +1,129 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+  labels:
+    app.kubernetes.io/name: policyservice
+    app: policyservice
+  name: policyservice
+spec:
+  ports:
+    - name: metrics
+      port: 9192
+      targetPort: 8080
+    - name: grpc
+      port: 6060
+      targetPort: 6060
+  selector:
+    app.kubernetes.io/name: policyservice
+  type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  annotations:
+    app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+    app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+  labels:
+    app: policyservice
+    app.kubernetes.io/name: policyservice
+  name: policyservice
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: policyservice
+  template:
+    metadata:
+      annotations:
+        app.quarkus.io/commit-id: 8065cee75be759e14af792737179537096de5e11
+        app.quarkus.io/build-timestamp: 2023-03-30 - 13:49:59 +0000
+      labels:
+        app: policyservice
+        app.kubernetes.io/name: policyservice
+    spec:
+      containers:
+        - env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: SERVICE_SERVICE_HOST
+              value: serviceservice
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
+            - name: MONITORING_SERVICE_HOST
+              value: monitoringservice
+          image: labs.etsi.org:5050/tfs/controller/policy:0.1.0
+          imagePullPolicy: Always
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/live
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          name: policyservice
+          ports:
+            - containerPort: 8080
+              name: metrics
+              protocol: TCP
+            - containerPort: 6060
+              name: grpc-server
+              protocol: TCP
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /q/health/ready
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 2
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 10
+          resources:
+            requests:
+              cpu: 50m
+              memory: 512Mi
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: policyservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: policyservice
+  minReplicas: 1
+  maxReplicas: 10
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
\ No newline at end of file
diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml
index 06c3390f4fddbcb6f8adec5d931989cc8a41cc68..f5da08182a4665b21607987ea97d9bf3cc5b7e21 100644
--- a/manifests/servicemonitors.yaml
+++ b/manifests/servicemonitors.yaml
@@ -243,3 +243,90 @@ spec:
     any: false
     matchNames:
     - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-policyservice-metric
+  labels:
+    app: policyservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: policyservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /q/metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-automationservice-metric
+  labels:
+    app: automationservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: automationservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /q/metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-computeservice-metric
+  labels:
+    app: computeservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: computeservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 3fa4a6e0dc256ba964fd4ee26a8b7095bb2303f4..ce90aa18854522f1c08e213cb554c70af70bac36 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: serviceservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -45,11 +45,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:3030"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 32m
+            memory: 32Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 128m
+            memory: 64Mi
 ---
 apiVersion: v1
 kind: Service
@@ -70,3 +70,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: serviceservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: serviceservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index 49e2b5943d20586941f80e8fc4b5c32c99d70f8e..8f312e8e0c89c5b8ed1923622078ea16b6bd876e 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: sliceservice
-  replicas: 1
+  #replicas: 1
   template:
     metadata:
       labels:
@@ -50,11 +50,11 @@ spec:
             command: ["/bin/grpc_health_probe", "-addr=:4040"]
         resources:
           requests:
-            cpu: 50m
-            memory: 64Mi
+            cpu: 32m
+            memory: 128Mi
           limits:
-            cpu: 500m
-            memory: 512Mi
+            cpu: 128m
+            memory: 256Mi
 ---
 apiVersion: v1
 kind: Service
@@ -75,3 +75,25 @@ spec:
     protocol: TCP
     port: 9192
     targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: sliceservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: sliceservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      target:
+        type: Utilization
+        averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index 234075f738abd880a7c269cb07b72ad6c635d4c6..b6ddfc0a91ae5316969079c517e148f63fb18b61 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -62,7 +62,7 @@ spec:
             cpu: 500m
             memory: 512Mi
       - name: grafana
-        image: grafana/grafana:8.5.11
+        image: grafana/grafana:8.5.22
         imagePullPolicy: IfNotPresent
         ports:
           - containerPort: 3000
diff --git a/my_deploy.sh b/my_deploy.sh
index 22a7ae8155135f8d81f2fa12d71f80d8dd7c57e9..d6f3513e9b2090905b7814c4563644ecda7bd2c6 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -29,7 +29,7 @@ export TFS_IMAGE_TAG="dev"
 export TFS_K8S_NAMESPACE="tfs"
 
 # Set additional manifest files to be applied after the deployment
-export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicemonitors.yaml"
 
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
@@ -115,3 +115,12 @@ export QDB_DROP_TABLES_IF_EXIST=""
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/proto/context.proto b/proto/context.proto
index 2dfbb7805eb444ee94e27bb00ca05d9a1c83b8ec..3b25e6361766ee4c2b52e15aab215409f40cbb56 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -40,7 +40,7 @@ service ContextService {
   rpc SetDevice          (Device        ) returns (       DeviceId        ) {}
   rpc RemoveDevice       (DeviceId      ) returns (       Empty           ) {}
   rpc GetDeviceEvents    (Empty         ) returns (stream DeviceEvent     ) {}
-
+  rpc SelectDevice       (DeviceFilter  ) returns (       DeviceList      ) {}
   rpc ListEndPointNames  (EndPointIdList) returns (       EndPointNameList) {}
 
   rpc ListLinkIds        (Empty         ) returns (       LinkIdList      ) {}
@@ -57,6 +57,7 @@ service ContextService {
   rpc UnsetService       (Service       ) returns (       ServiceId       ) {}
   rpc RemoveService      (ServiceId     ) returns (       Empty           ) {}
   rpc GetServiceEvents   (Empty         ) returns (stream ServiceEvent    ) {}
+  rpc SelectService      (ServiceFilter ) returns (       ServiceList     ) {}
 
   rpc ListSliceIds       (ContextId     ) returns (       SliceIdList     ) {}
   rpc ListSlices         (ContextId     ) returns (       SliceList       ) {}
@@ -65,6 +66,7 @@ service ContextService {
   rpc UnsetSlice         (Slice         ) returns (       SliceId         ) {}
   rpc RemoveSlice        (SliceId       ) returns (       Empty           ) {}
   rpc GetSliceEvents     (Empty         ) returns (stream SliceEvent      ) {}
+  rpc SelectSlice        (SliceFilter   ) returns (       SliceList       ) {}
 
   rpc ListConnectionIds  (ServiceId     ) returns (       ConnectionIdList) {}
   rpc ListConnections    (ServiceId     ) returns (       ConnectionList  ) {}
@@ -208,6 +210,13 @@ message DeviceList {
   repeated Device devices = 1;
 }
 
+message DeviceFilter {
+  DeviceIdList device_ids = 1;
+  bool include_endpoints = 2;
+  bool include_config_rules = 3;
+  bool include_components = 4;
+}
+
 message DeviceEvent {
   Event event = 1;
   DeviceId device_id = 2;
@@ -288,6 +297,13 @@ message ServiceList {
   repeated Service services = 1;
 }
 
+message ServiceFilter {
+  ServiceIdList service_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_config_rules = 4;
+}
+
 message ServiceEvent {
   Event event = 1;
   ServiceId service_id = 2;
@@ -342,6 +358,15 @@ message SliceList {
   repeated Slice slices = 1;
 }
 
+message SliceFilter {
+  SliceIdList slice_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_service_ids = 4;
+  bool include_subslice_ids = 5;
+  bool include_config_rules = 6;
+}
+
 message SliceEvent {
   Event event = 1;
   SliceId slice_id = 2;
diff --git a/scripts/old/open_dashboard.sh b/scripts/old/open_dashboard.sh
old mode 100755
new mode 100644
index 4ea206f4538c27fe8563ce5c30ed837781f8d362..2ff15684a499fe390816ebb8e4859cad49d43d32
--- a/scripts/old/open_dashboard.sh
+++ b/scripts/old/open_dashboard.sh
@@ -16,9 +16,7 @@
 
 # this script opens the dashboard
 
-K8S_NAMESPACE=${K8S_NAMESPACE:-'tfs'}
-
-GRAFANA_IP=$(kubectl get service/webuiservice -n ${TFS_K8S_NAMESPACE} -o jsonpath='{.spec.clusterIP}')
+GRAFANA_IP=$(kubectl get service/grafana -n monitoring -o jsonpath='{.spec.clusterIP}')
 GRAFANA_PORT=3000 #$(kubectl get service webuiservice --namespace $TFS_K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==3000)].nodePort}')
 URL=http://${GRAFANA_IP}:${GRAFANA_PORT}
 
diff --git a/src/automation/pom.xml b/src/automation/pom.xml
index 2fd5fd263a698145f39c37ed358982de58dfee77..7dfc3dac438fa5df740381be0ef595a5734d7699 100644
--- a/src/automation/pom.xml
+++ b/src/automation/pom.xml
@@ -174,6 +174,11 @@
       <scope>test</scope>
     </dependency>
 
+    <dependency>
+      <groupId>io.quarkus</groupId>
+      <artifactId>quarkus-smallrye-metrics</artifactId>
+  </dependency>
+
   </dependencies>
 
   <build>
diff --git a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
index 51857bb3dba6422fe6ffc93930e0e2bf65b1a223..2f9054cd8296579b3e391aae84ec16ad1f460bdb 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java
@@ -27,6 +27,10 @@ import io.quarkus.grpc.GrpcService;
 import io.smallrye.mutiny.Uni;
 import javax.inject.Inject;
 
+import org.eclipse.microprofile.metrics.MetricUnits;
+import org.eclipse.microprofile.metrics.annotation.Counted;
+import org.eclipse.microprofile.metrics.annotation.Timed;
+
 @GrpcService
 public class AutomationGatewayImpl implements AutomationGateway {
 
@@ -40,18 +44,24 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpGetDeviceRole_counter")
+    @Timed(name = "automation_ztpGetDeviceRole_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRole> ztpGetDeviceRole(Automation.DeviceRoleId request) {
         return Uni.createFrom()
                 .item(() -> Automation.DeviceRole.newBuilder().setDevRoleId(request).build());
     }
 
     @Override
+    @Counted(name = "automation_ztpGetDeviceRolesByDeviceId_counter")
+    @Timed(name = "automation_ztpGetDeviceRolesByDeviceId_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleList> ztpGetDeviceRolesByDeviceId(
             ContextOuterClass.DeviceId request) {
         return Uni.createFrom().item(() -> Automation.DeviceRoleList.newBuilder().build());
     }
 
     @Override
+    @Counted(name = "automation_ztpAdd_counter")
+    @Timed(name = "automation_ztpAdd_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleState> ztpAdd(Automation.DeviceRole request) {
         final var devRoleId = request.getDevRoleId().getDevRoleId().getUuid();
         final var deviceId = serializer.deserialize(request.getDevRoleId().getDevId());
@@ -63,6 +73,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpUpdate_counter")
+    @Timed(name = "automation_ztpUpdate_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<DeviceRoleState> ztpUpdate(DeviceRoleConfig request) {
         final var devRoleId = request.getDevRole().getDevRoleId().getDevRoleId().getUuid();
         final var deviceId = serializer.deserialize(request.getDevRole().getDevRoleId().getDevId());
@@ -75,6 +87,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpDelete_counter")
+    @Timed(name = "automation_ztpDelete_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceRoleState> ztpDelete(Automation.DeviceRole request) {
         final var devRoleId = request.getDevRoleId().getDevRoleId().getUuid();
         return automationService
@@ -84,6 +98,8 @@ public class AutomationGatewayImpl implements AutomationGateway {
     }
 
     @Override
+    @Counted(name = "automation_ztpDeleteAll_counter")
+    @Timed(name = "automation_ztpDeleteAll_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<Automation.DeviceDeletionResult> ztpDeleteAll(ContextOuterClass.Empty empty) {
         return Uni.createFrom().item(() -> Automation.DeviceDeletionResult.newBuilder().build());
     }
diff --git a/src/automation/src/main/resources/application.yml b/src/automation/src/main/resources/application.yml
index f7b767e98f55556d21910b649fdfda2d9a8f94ec..bf638039daf3460c2f4ef374a380b37d01de1f1c 100644
--- a/src/automation/src/main/resources/application.yml
+++ b/src/automation/src/main/resources/application.yml
@@ -17,7 +17,7 @@ automation:
 quarkus:
   banner:
     path: teraflow-automation-banner.txt
-  grpc:
+  grpc: 
     server:
       port: 5050
       enable-reflection-service: true
@@ -36,6 +36,7 @@ quarkus:
     group: tfs
     name: controller/automation
     registry: labs.etsi.org:5050
+    tag: 0.2.0
 
   kubernetes:
     name: automationservice
@@ -51,12 +52,16 @@ quarkus:
       period: 10s
     ports:
       http:
-        host-port: 8080
+        host-port: 9192
         container-port: 8080
-      grpc:
-        host-port: 5050
-        container-port: 5050
     env:
       vars:
         context-service-host: "contextservice"
         device-service-host: "deviceservice"
+    resources:
+      requests:
+        cpu: 50m
+        memory: 512Mi
+      limits:
+        cpu: 500m
+        memory: 2048Mi
diff --git a/src/automation/target/kubernetes/kubernetes.yml b/src/automation/target/kubernetes/kubernetes.yml
index 4dacf3998c3991a441dc374ca6c6abc29e8d3b80..7aa68a257eeda04d6101f05b291882c274c43f86 100644
--- a/src/automation/target/kubernetes/kubernetes.yml
+++ b/src/automation/target/kubernetes/kubernetes.yml
@@ -4,32 +4,36 @@
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 ---
 apiVersion: v1
 kind: Service
 metadata:
   annotations:
-    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:47:48 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app.kubernetes.io/name: automationservice
     app: automationservice
   name: automationservice
 spec:
   ports:
-    - name: grpc
-      port: 5050
-      targetPort: 5050
     - name: http
-      port: 8080
+      port: 9192
       targetPort: 8080
+    - name: grpc-server
+      port: 5050
+      targetPort: 5050
   selector:
     app.kubernetes.io/name: automationservice
   type: ClusterIP
@@ -38,7 +42,12 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   annotations:
-    app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:47:48 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app: automationservice
     app.kubernetes.io/name: automationservice
@@ -51,7 +60,12 @@ spec:
   template:
     metadata:
       annotations:
-        app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
+        app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+        app.quarkus.io/build-timestamp: 2023-04-04 - 11:47:48 +0000
+        prometheus.io/scrape: "true"
+        prometheus.io/path: /q/metrics
+        prometheus.io/port: "8080"
+        prometheus.io/scheme: http
       labels:
         app: automationservice
         app.kubernetes.io/name: automationservice
@@ -80,12 +94,12 @@ spec:
             timeoutSeconds: 10
           name: automationservice
           ports:
-            - containerPort: 5050
-              name: grpc
-              protocol: TCP
             - containerPort: 8080
               name: http
               protocol: TCP
+            - containerPort: 5050
+              name: grpc-server
+              protocol: TCP
           readinessProbe:
             failureThreshold: 3
             httpGet:
@@ -96,3 +110,10 @@ spec:
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
+          resources:
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+            requests:
+              cpu: 50m
+              memory: 512Mi
diff --git a/src/common/message_broker/backend/nats/NatsBackendThread.py b/src/common/message_broker/backend/nats/NatsBackendThread.py
index e59e4d6835ef662e4b0ed9f92d79a45c22954a6f..0bedd2b242f7eeaa1585d0eb41c5a0bd9efe07e5 100644
--- a/src/common/message_broker/backend/nats/NatsBackendThread.py
+++ b/src/common/message_broker/backend/nats/NatsBackendThread.py
@@ -12,10 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import asyncio, nats, nats.errors, queue, threading
+import asyncio, logging, nats, nats.errors, queue, threading
 from typing import List
 from common.message_broker.Message import Message
 
+LOGGER = logging.getLogger(__name__)
+
 class NatsBackendThread(threading.Thread):
     def __init__(self, nats_uri : str) -> None:
         self._nats_uri = nats_uri
@@ -32,7 +34,9 @@ class NatsBackendThread(threading.Thread):
         self._tasks_terminated.set()
 
     async def _run_publisher(self) -> None:
+        LOGGER.info('[_run_publisher] NATS URI: {:s}'.format(str(self._nats_uri)))
         client = await nats.connect(servers=[self._nats_uri])
+        LOGGER.info('[_run_publisher] Connected!')
         while not self._terminate.is_set():
             try:
                 message : Message = await self._publish_queue.get()
@@ -47,8 +51,11 @@ class NatsBackendThread(threading.Thread):
     async def _run_subscriber(
         self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event
     ) -> None:
+        LOGGER.info('[_run_subscriber] NATS URI: {:s}'.format(str(self._nats_uri)))
         client = await nats.connect(servers=[self._nats_uri])
+        LOGGER.info('[_run_subscriber] Connected!')
         subscription = await client.subscribe(topic_name)
+        LOGGER.info('[_run_subscriber] Subscribed!')
         while not self._terminate.is_set() and not unsubscribe.is_set():
             try:
                 message = await subscription.next_msg(timeout)
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
index 0e1d8c7371e87b47bfc47a4242e00039add48e7f..1e238510c98b83bebde8167711b988d7476e5a99 100644
--- a/src/common/tools/descriptor/Loader.py
+++ b/src/common/tools/descriptor/Loader.py
@@ -222,13 +222,13 @@ class DescriptorLoader:
         self.__topologies_add = get_descriptors_add_topologies(self.__topologies)
 
         if self.__dummy_mode:
-            self._dummy_mode()
+            self._load_dummy_mode()
         else:
-            self._normal_mode()
+            self._load_normal_mode()
         
         return self.__results
 
-    def _dummy_mode(self) -> None:
+    def _load_dummy_mode(self) -> None:
         # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
         self.__ctx_cli.connect()
         self._process_descr('context',    'add',    self.__ctx_cli.SetContext,    Context,    self.__contexts_add  )
@@ -242,7 +242,7 @@ class DescriptorLoader:
         self._process_descr('topology',   'update', self.__ctx_cli.SetTopology,   Topology,   self.__topologies    )
         #self.__ctx_cli.close()
 
-    def _normal_mode(self) -> None:
+    def _load_normal_mode(self) -> None:
         # Normal mode: follows the automated workflows in the different components
         assert len(self.__connections) == 0, 'in normal mode, connections should not be set'
 
@@ -321,7 +321,35 @@ class DescriptorLoader:
             response = self.__ctx_cli.ListSlices(ContextId(**json_context_id(context_uuid)))
             assert len(response.slices) == num_slices
 
-    def unload(self) -> None:
+    def _unload_dummy_mode(self) -> None:
+        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
+        self.__ctx_cli.connect()
+
+        for _, slice_list in self.slices.items():
+            for slice_ in slice_list:
+                self.__ctx_cli.RemoveSlice(SliceId(**slice_['slice_id']))
+
+        for _, service_list in self.services.items():
+            for service in service_list:
+                self.__ctx_cli.RemoveService(ServiceId(**service['service_id']))
+
+        for link in self.links:
+            self.__ctx_cli.RemoveLink(LinkId(**link['link_id']))
+
+        for device in self.devices:
+            self.__ctx_cli.RemoveDevice(DeviceId(**device['device_id']))
+
+        for _, topology_list in self.topologies.items():
+            for topology in topology_list:
+                self.__ctx_cli.RemoveTopology(TopologyId(**topology['topology_id']))
+
+        for context in self.contexts:
+            self.__ctx_cli.RemoveContext(ContextId(**context['context_id']))
+
+        #self.__ctx_cli.close()
+
+    def _unload_normal_mode(self) -> None:
+        # Normal mode: follows the automated workflows in the different components
         self.__ctx_cli.connect()
         self.__dev_cli.connect()
         self.__svc_cli.connect()
@@ -348,6 +376,17 @@ class DescriptorLoader:
         for context in self.contexts:
             self.__ctx_cli.RemoveContext(ContextId(**context['context_id']))
 
+        #self.__ctx_cli.close()
+        #self.__dev_cli.close()
+        #self.__svc_cli.close()
+        #self.__slc_cli.close()
+
+    def unload(self) -> None:
+        if self.__dummy_mode:
+            self._unload_dummy_mode()
+        else:
+            self._unload_normal_mode()
+
 def compose_notifications(results : TypeResults) -> TypeNotificationList:
     notifications = []
     for entity_name, action_name, num_ok, error_list in results:
diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py
index 7c3832d6b3ea7de0a495faee143b73179e8da5b9..13d9dc0035b45845bf11367e02c8830b5151c1d6 100644
--- a/src/context/client/ContextClient.py
+++ b/src/context/client/ContextClient.py
@@ -21,11 +21,11 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
     Empty, EndPointIdList, EndPointNameList,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
     Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.context_pb2_grpc import ContextServiceStub
 from common.proto.context_policy_pb2_grpc import ContextPolicyServiceStub
@@ -185,6 +185,13 @@ class ContextClient:
         LOGGER.debug('RemoveDevice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectDevice(self, request: DeviceFilter) -> DeviceList:
+        LOGGER.debug('SelectDevice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectDevice(request)
+        LOGGER.debug('SelectDevice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetDeviceEvents(self, request: Empty) -> Iterator[DeviceEvent]:
         LOGGER.debug('GetDeviceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
@@ -283,6 +290,13 @@ class ContextClient:
         LOGGER.debug('RemoveService result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectService(self, request: ServiceFilter) -> ServiceList:
+        LOGGER.debug('SelectService request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectService(request)
+        LOGGER.debug('SelectService result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetServiceEvents(self, request: Empty) -> Iterator[ServiceEvent]:
         LOGGER.debug('GetServiceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
@@ -332,6 +346,13 @@ class ContextClient:
         LOGGER.debug('RemoveSlice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def SelectSlice(self, request: SliceFilter) -> SliceList:
+        LOGGER.debug('SelectSlice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectSlice(request)
+        LOGGER.debug('SelectSlice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def GetSliceEvents(self, request: Empty) -> Iterator[SliceEvent]:
         LOGGER.debug('GetSliceEvents request: {:s}'.format(grpc_message_to_json_string(request)))
diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py
index 6fe00f917cf8b338f0934e2a268fa757d2055865..789ee7a78c6bcff3e62a6dd373bd58dbb2e7a960 100644
--- a/src/context/service/ContextServiceServicerImpl.py
+++ b/src/context/service/ContextServiceServicerImpl.py
@@ -18,11 +18,11 @@ from common.message_broker.MessageBroker import MessageBroker
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
     Empty, EndPointIdList, EndPointNameList, EventTypeEnum,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
     Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
 from common.proto.context_pb2_grpc import ContextServiceServicer
@@ -31,13 +31,13 @@ from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_m
 from .database.Connection import (
     connection_delete, connection_get, connection_list_ids, connection_list_objs, connection_set)
 from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set
-from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set
+from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_select, device_set
 from .database.EndPoint import endpoint_list_names
 from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set
 from .database.PolicyRule import (
     policyrule_delete, policyrule_get, policyrule_list_ids, policyrule_list_objs, policyrule_set)
-from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set
-from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset
+from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_select, service_set
+from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_select, slice_set, slice_unset
 from .database.Topology import (
     topology_delete, topology_get, topology_get_details, topology_list_ids, topology_list_objs, topology_set)
 from .Events import (
@@ -161,6 +161,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectDevices(self, request : DeviceFilter, context : grpc.ServicerContext) -> DeviceList:
+        return DeviceList(devices=device_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
         for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
@@ -235,6 +239,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectService(self, request : ServiceFilter, context : grpc.ServicerContext) -> ServiceList:
+        return ServiceList(services=service_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
         for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
@@ -278,6 +286,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
             notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
         return Empty()
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectSlice(self, request : SliceFilter, context : grpc.ServicerContext) -> SliceList:
+        return SliceList(slices=slice_select(self.db_engine, request))
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
         for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py
index a3edb8ea2838d9203a810677da495893a2cd6973..80d3b3a6d437986741ee5308205d8a902e897c40 100644
--- a/src/context/service/database/Connection.py
+++ b/src/context/service/database/Connection.py
@@ -16,7 +16,7 @@ import datetime, logging, re
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.exc import IntegrityError
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Connection, ConnectionId, ServiceId
@@ -40,7 +40,11 @@ def connection_list_ids(db_engine : Engine, request : ServiceId) -> List[Dict]:
 def connection_list_objs(db_engine : Engine, request : ServiceId) -> List[Dict]:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all()
+        obj_list : List[ConnectionModel] = session.query(ConnectionModel)\
+            .options(selectinload(ConnectionModel.connection_service))\
+            .options(selectinload(ConnectionModel.connection_endpoints))\
+            .options(selectinload(ConnectionModel.connection_subservices))\
+            .filter_by(service_uuid=service_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -48,6 +52,9 @@ def connection_get(db_engine : Engine, request : ConnectionId) -> Dict:
     connection_uuid = connection_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[ConnectionModel] = session.query(ConnectionModel)\
+            .options(selectinload(ConnectionModel.connection_service))\
+            .options(selectinload(ConnectionModel.connection_endpoints))\
+            .options(selectinload(ConnectionModel.connection_subservices))\
             .filter_by(connection_uuid=connection_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py
index 9e05e54b38d3772ece2d87de0d98fb5a216088de..4654095034749e1de985705b242ba9fa05a82f6a 100644
--- a/src/context/service/database/Context.py
+++ b/src/context/service/database/Context.py
@@ -15,7 +15,7 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Context, ContextId
@@ -34,14 +34,22 @@ def context_list_ids(db_engine : Engine) -> List[Dict]:
 
 def context_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ContextModel] = session.query(ContextModel).all()
+        obj_list : List[ContextModel] = session.query(ContextModel)\
+            .options(selectinload(ContextModel.topologies))\
+            .options(selectinload(ContextModel.services))\
+            .options(selectinload(ContextModel.slices))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def context_get(db_engine : Engine, request : ContextId) -> Dict:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[ContextModel] = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
+        obj : Optional[ContextModel] = session.query(ContextModel)\
+            .options(selectinload(ContextModel.topologies))\
+            .options(selectinload(ContextModel.services))\
+            .options(selectinload(ContextModel.slices))\
+            .filter_by(context_uuid=context_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py
index c5a19c9c4b0bca4f85ffe1211dbefc6b218d518e..3e106bc158ab804c7eada7284e9d1b883eb66264 100644
--- a/src/context/service/database/Device.py
+++ b/src/context/service/database/Device.py
@@ -15,12 +15,12 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
-from common.proto.context_pb2 import Device, DeviceId, TopologyId
-from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.proto.context_pb2 import Device, DeviceFilter, DeviceId, TopologyId
+#from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Device import json_device_id
 from context.service.database.uuids.Topology import topology_get_uuid
 from .models.DeviceModel import DeviceModel
@@ -43,14 +43,22 @@ def device_list_ids(db_engine : Engine) -> List[Dict]:
 
 def device_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[DeviceModel] = session.query(DeviceModel).all()
+        obj_list : List[DeviceModel] = session.query(DeviceModel)\
+            .options(selectinload(DeviceModel.endpoints))\
+            .options(selectinload(DeviceModel.config_rules))\
+            .all()
+            #.options(selectinload(DeviceModel.components))\
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def device_get(db_engine : Engine, request : DeviceId) -> Dict:
     device_uuid = device_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[DeviceModel] = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none()
+        obj : Optional[DeviceModel] = session.query(DeviceModel)\
+            .options(selectinload(DeviceModel.endpoints))\
+            .options(selectinload(DeviceModel.config_rules))\
+            .filter_by(device_uuid=device_uuid).one_or_none()
+            #.options(selectinload(DeviceModel.components))\
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -163,7 +171,9 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
             endpoint_updates = session.execute(stmt).fetchall()
             updated_endpoints = any([(updated_at > created_at) for created_at,updated_at in endpoint_updates])
 
-        if len(related_topologies) > 0:
+        if not updated or len(related_topologies) > 1:
+            # Only update topology-device relations when device is created (not updated) or when endpoints are
+            # modified (len(related_topologies) > 1).
             session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing(
                 index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid]
             ))
@@ -182,3 +192,22 @@ def device_delete(db_engine : Engine, request : DeviceId) -> Tuple[Dict, bool]:
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_device_id(device_uuid),deleted
+
+def device_select(db_engine : Engine, request : DeviceFilter) -> List[Dict]:
+    device_uuids = [
+        device_get_uuid(device_id, allow_random=False)
+        for device_id in request.device_ids.device_ids
+    ]
+    dump_params = dict(
+        include_endpoints   =request.include_endpoints,
+        include_config_rules=request.include_config_rules,
+        include_components  =request.include_components,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(DeviceModel)
+        if request.include_endpoints   : query = query.options(selectinload(DeviceModel.endpoints))
+        if request.include_config_rules: query = query.options(selectinload(DeviceModel.config_rules))
+        #if request.include_components  : query = query.options(selectinload(DeviceModel.components))
+        obj_list : List[DeviceModel] = query.filter(DeviceModel.device_uuid.in_(device_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/EndPoint.py b/src/context/service/database/EndPoint.py
index e2f86893abdf62c9675a83b2a80ceed1227b85d4..b0df3bb8101a7b64a148e916178b1c9a77d511af 100644
--- a/src/context/service/database/EndPoint.py
+++ b/src/context/service/database/EndPoint.py
@@ -14,7 +14,7 @@
 
 import logging
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List
 from common.proto.context_pb2 import EndPointIdList
@@ -29,7 +29,8 @@ def endpoint_list_names(db_engine : Engine, request : EndPointIdList) -> List[Di
         for endpoint_id in request.endpoint_ids
     }
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[EndPointModel] = \
-            session.query(EndPointModel).filter(EndPointModel.endpoint_uuid.in_(endpoint_uuids)).all()
+        obj_list : List[EndPointModel] = session.query(EndPointModel)\
+            .options(selectinload(EndPointModel.device))\
+            .filter(EndPointModel.endpoint_uuid.in_(endpoint_uuids)).all()
         return [obj.dump_name() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py
index 299827dbdde6f9961d55be5f69f4e359f4e876a7..f5bfc9dea5fb81fa8becfedc8ce1e4e0f59e7292 100644
--- a/src/context/service/database/Link.py
+++ b/src/context/service/database/Link.py
@@ -15,7 +15,7 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.proto.context_pb2 import Link, LinkId
@@ -36,14 +36,18 @@ def link_list_ids(db_engine : Engine) -> List[Dict]:
 
 def link_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[LinkModel] = session.query(LinkModel).all()
+        obj_list : List[LinkModel] = session.query(LinkModel)\
+            .options(selectinload(LinkModel.link_endpoints))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def link_get(db_engine : Engine, request : LinkId) -> Dict:
     link_uuid = link_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[LinkModel] = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none()
+        obj : Optional[LinkModel] = session.query(LinkModel)\
+            .options(selectinload(LinkModel.link_endpoints))\
+            .filter_by(link_uuid=link_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py
index e95cec4ae533795b23b8fd4e2f26ac9000c1bcce..13f0a2698c17874e1e15f4d6a1d527d366141f56 100644
--- a/src/context/service/database/PolicyRule.py
+++ b/src/context/service/database/PolicyRule.py
@@ -15,7 +15,7 @@
 import datetime, json
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.proto.policy_pb2 import PolicyRule, PolicyRuleId, PolicyRuleIdList, PolicyRuleList
@@ -31,14 +31,15 @@ from .uuids.Service import service_get_uuid
 def policyrule_list_ids(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
-        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
         return [obj.dump_id() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def policyrule_list_objs(db_engine : Engine) -> List[Dict]:
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
-        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
+        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel)\
+            .options(selectinload(PolicyRuleModel.policyrule_service))\
+            .options(selectinload(PolicyRuleModel.policyrule_devices))\
+            .all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -46,6 +47,8 @@ def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule:
     policyrule_uuid = policyrule_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[PolicyRuleModel] = session.query(PolicyRuleModel)\
+            .options(selectinload(PolicyRuleModel.policyrule_service))\
+            .options(selectinload(PolicyRuleModel.policyrule_devices))\
             .filter_by(policyrule_uuid=policyrule_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py
index fe12eaf8a011e2ae4861c64cdfac8c4b9c388731..32484a3095c3d937392f580597339fe047d36e3f 100644
--- a/src/context/service/database/Service.py
+++ b/src/context/service/database/Service.py
@@ -15,10 +15,10 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import ContextId, Service, ServiceId
+from common.proto.context_pb2 import ContextId, Service, ServiceFilter, ServiceId
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Service import json_service_id
@@ -43,14 +43,22 @@ def service_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def service_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[ServiceModel] = session.query(ServiceModel)\
+            .options(selectinload(ServiceModel.service_endpoints))\
+            .options(selectinload(ServiceModel.constraints))\
+            .options(selectinload(ServiceModel.config_rules))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def service_get(db_engine : Engine, request : ServiceId) -> Dict:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[ServiceModel] = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none()
+        obj : Optional[ServiceModel] = session.query(ServiceModel)\
+            .options(selectinload(ServiceModel.service_endpoints))\
+            .options(selectinload(ServiceModel.constraints))\
+            .options(selectinload(ServiceModel.config_rules))\
+            .filter_by(service_uuid=service_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -145,3 +153,22 @@ def service_delete(db_engine : Engine, request : ServiceId) -> Tuple[Dict, bool]
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_service_id(service_uuid, json_context_id(context_uuid)),deleted
+
+def service_select(db_engine : Engine, request : ServiceFilter) -> List[Dict]:
+    service_uuids = [
+        service_get_uuid(service_id, allow_random=False)[1]
+        for service_id in request.service_ids.service_ids
+    ]
+    dump_params = dict(
+        include_endpoint_ids=request.include_endpoint_ids,
+        include_constraints =request.include_constraints,
+        include_config_rules=request.include_config_rules,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(ServiceModel)
+        if request.include_endpoint_ids: query = query.options(selectinload(ServiceModel.service_endpoints))
+        if request.include_constraints : query = query.options(selectinload(ServiceModel.constraints))
+        if request.include_config_rules: query = query.options(selectinload(ServiceModel.config_rules))
+        obj_list : List[ServiceModel] = query.filter(ServiceModel.service_uuid.in_(service_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py
index 724046bfae16ea8e75ba84b83ff52c1050242003..abd140024f2a13289c7af6a3bafe363a8247e053 100644
--- a/src/context/service/database/Slice.py
+++ b/src/context/service/database/Slice.py
@@ -16,10 +16,10 @@ import datetime, logging
 from sqlalchemy import and_
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import ContextId, Slice, SliceId
+from common.proto.context_pb2 import ContextId, Slice, SliceFilter, SliceId
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Slice import json_slice_id
@@ -44,14 +44,26 @@ def slice_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def slice_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[SliceModel] = session.query(SliceModel)\
+            .options(selectinload(SliceModel.slice_endpoints))\
+            .options(selectinload(SliceModel.slice_services))\
+            .options(selectinload(SliceModel.slice_subslices))\
+            .options(selectinload(SliceModel.constraints))\
+            .options(selectinload(SliceModel.config_rules))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
 def slice_get(db_engine : Engine, request : SliceId) -> Dict:
     _,slice_uuid = slice_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
-        obj : Optional[SliceModel] = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).one_or_none()
+        obj : Optional[SliceModel] = session.query(SliceModel)\
+            .options(selectinload(SliceModel.slice_endpoints))\
+            .options(selectinload(SliceModel.slice_services))\
+            .options(selectinload(SliceModel.slice_subslices))\
+            .options(selectinload(SliceModel.constraints))\
+            .options(selectinload(SliceModel.config_rules))\
+            .filter_by(slice_uuid=slice_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -240,3 +252,26 @@ def slice_delete(db_engine : Engine, request : SliceId) -> Tuple[Dict, bool]:
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_slice_id(slice_uuid, json_context_id(context_uuid)),deleted
+
+def slice_select(db_engine : Engine, request : SliceFilter) -> List[Dict]:
+    slice_uuids = [
+        slice_get_uuid(slice_id, allow_random=False)[1]
+        for slice_id in request.slice_ids.slice_ids
+    ]
+    dump_params = dict(
+        include_endpoint_ids=request.include_endpoint_ids,
+        include_constraints =request.include_constraints,
+        include_service_ids =request.include_service_ids,
+        include_subslice_ids=request.include_subslice_ids,
+        include_config_rules=request.include_config_rules,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(SliceModel)
+        if request.include_endpoint_ids: query = query.options(selectinload(SliceModel.slice_endpoints))
+        if request.include_service_ids : query = query.options(selectinload(SliceModel.slice_services))
+        if request.include_subslice_ids: query = query.options(selectinload(SliceModel.slice_subslices))
+        if request.include_constraints : query = query.options(selectinload(SliceModel.constraints))
+        if request.include_config_rules: query = query.options(selectinload(SliceModel.config_rules))
+        obj_list : List[SliceModel] = query.filter(SliceModel.slice_uuid.in_(slice_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py
index e2c6e2e996ac9321d0d8b9ae2ecea018b650632f..4440299b63f68613854e79998270872389d385cb 100644
--- a/src/context/service/database/Topology.py
+++ b/src/context/service/database/Topology.py
@@ -15,14 +15,16 @@
 import datetime, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
-from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
 from common.proto.context_pb2 import ContextId, Topology, TopologyId
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology_id
-from .models.TopologyModel import TopologyModel
+from .models.DeviceModel import DeviceModel
+from .models.LinkModel import LinkModel
+from .models.TopologyModel import TopologyDeviceModel, TopologyLinkModel, TopologyModel
 from .uuids.Context import context_get_uuid
 from .uuids.Topology import topology_get_uuid
 
@@ -38,7 +40,10 @@ def topology_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
 def topology_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
-        obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all()
+        obj_list : List[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices))\
+            .options(selectinload(TopologyModel.topology_links))\
+            .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
     return run_transaction(sessionmaker(bind=db_engine), callback)
 
@@ -46,6 +51,8 @@ def topology_get(db_engine : Engine, request : TopologyId) -> Dict:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices))\
+            .options(selectinload(TopologyModel.topology_links))\
             .filter_by(topology_uuid=topology_uuid).one_or_none()
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
@@ -62,7 +69,10 @@ def topology_get_details(db_engine : Engine, request : TopologyId) -> Dict:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
+            .options(selectinload(TopologyModel.topology_devices, TopologyDeviceModel.device, DeviceModel.endpoints))\
+            .options(selectinload(TopologyModel.topology_links, TopologyLinkModel.link, LinkModel.link_endpoints))\
             .filter_by(topology_uuid=topology_uuid).one_or_none()
+            #.options(selectinload(DeviceModel.components))\
         return None if obj is None else obj.dump_details()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py
index 156e33c6bb32e237af241035f1d9672b0b419222..f71d4177893d146af2f413781b51930c9909d827 100644
--- a/src/context/service/database/models/ConnectionModel.py
+++ b/src/context/service/database/models/ConnectionModel.py
@@ -59,8 +59,8 @@ class ConnectionEndPointModel(_Base):
     endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position        = Column(Integer, nullable=False)
 
-    connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined')
-    endpoint   = relationship('EndPointModel',   lazy='joined') # back_populates='connection_endpoints'
+    connection = relationship('ConnectionModel', back_populates='connection_endpoints') #, lazy='joined'
+    endpoint   = relationship('EndPointModel',   lazy='selectin') # back_populates='connection_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
@@ -72,5 +72,5 @@ class ConnectionSubServiceModel(_Base):
     connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
     subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
-    connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined')
-    subservice = relationship('ServiceModel',    lazy='joined') # back_populates='connection_subservices'
+    connection = relationship('ConnectionModel', back_populates='connection_subservices') #, lazy='joined'
+    subservice = relationship('ServiceModel',    lazy='selectin') # back_populates='connection_subservices'
diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py
index 2124386d16e2e33aec58f5b39bf0f89e3c6589f1..24130841d2bafde3608f2fa1cbdd476d28acba46 100644
--- a/src/context/service/database/models/DeviceModel.py
+++ b/src/context/service/database/models/DeviceModel.py
@@ -16,7 +16,7 @@ import operator
 from sqlalchemy import Column, DateTime, Enum, String
 from sqlalchemy.dialects.postgresql import ARRAY, UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.DeviceDriver import ORM_DeviceDriverEnum
 from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum
 from ._Base import _Base
@@ -39,19 +39,29 @@ class DeviceModel(_Base):
     def dump_id(self) -> Dict:
         return {'device_uuid': {'uuid': self.device_uuid}}
 
-    def dump(self) -> Dict:
-        return {
+    def dump_endpoints(self) -> List[Dict]:
+        return [endpoint.dump() for endpoint in self.endpoints]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    #def dump_components(self) -> List[Dict]:
+    #    return []
+
+    def dump(self,
+        include_endpoints : bool = True, include_config_rules : bool = True, include_components : bool = True,
+    ) -> Dict:
+        result = {
             'device_id'                : self.dump_id(),
             'name'                     : self.device_name,
             'device_type'              : self.device_type,
             'device_operational_status': self.device_operational_status.value,
             'device_drivers'           : [driver.value for driver in self.device_drivers],
-            'device_config'            : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
-            'device_endpoints'         : [
-                endpoint.dump()
-                for endpoint in self.endpoints
-            ],
         }
+        if include_endpoints: result['device_endpoints'] = self.dump_endpoints()
+        if include_config_rules: result['device_config'] = self.dump_config_rules()
+        #if include_components: result['components'] = self.dump_components()
+        return result
diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py
index 12ba7e10e7c3d5789f9bf16ad7b4f50c35a36bf5..a079f9900e39fdf3a4329e604f4e596e7f5d1f89 100644
--- a/src/context/service/database/models/EndPointModel.py
+++ b/src/context/service/database/models/EndPointModel.py
@@ -31,8 +31,8 @@ class EndPointModel(_Base):
     created_at       = Column(DateTime, nullable=False)
     updated_at       = Column(DateTime, nullable=False)
 
-    device            = relationship('DeviceModel',          back_populates='endpoints')
-    topology          = relationship('TopologyModel')
+    device            = relationship('DeviceModel',          back_populates='endpoints') # lazy='selectin'
+    topology          = relationship('TopologyModel', lazy='selectin')
     #link_endpoints    = relationship('LinkEndPointModel',    back_populates='endpoint' )
     #service_endpoints = relationship('ServiceEndPointModel', back_populates='endpoint' )
 
diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py
index e9fd9bc8742222e0934a76b6e0ffa4acb1b71f40..9c16da3c9146f28352e8b4f7a6f9ab85f870c8b7 100644
--- a/src/context/service/database/models/LinkModel.py
+++ b/src/context/service/database/models/LinkModel.py
@@ -50,8 +50,8 @@ class LinkEndPointModel(_Base):
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position      = Column(Integer, nullable=False)
 
-    link     = relationship('LinkModel',     back_populates='link_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints'
+    link     = relationship('LinkModel',     back_populates='link_endpoints') #, lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='link_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py
index 663a9a39a30903b3dd41ccfee56da19528325af0..32364e289cf68fe760c60eb27cde933f7cf448a4 100644
--- a/src/context/service/database/models/PolicyRuleModel.py
+++ b/src/context/service/database/models/PolicyRuleModel.py
@@ -74,4 +74,4 @@ class PolicyRuleDeviceModel(_Base):
     device_uuid     = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices'
-    device     = relationship('DeviceModel',     lazy='joined') # back_populates='policyrule_devices'
+    device     = relationship('DeviceModel',     lazy='selectin') # back_populates='policyrule_devices'
diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py
index f1781c4f86fa25e8d9f3e42da46451e112ef779e..ef6e1b06aaaa616ede6f9633e4e0d7fc0aabf336 100644
--- a/src/context/service/database/models/ServiceModel.py
+++ b/src/context/service/database/models/ServiceModel.py
@@ -16,7 +16,7 @@ import operator
 from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
 from sqlalchemy.dialects.postgresql import UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.ServiceStatus import ORM_ServiceStatusEnum
 from .enums.ServiceType import ORM_ServiceTypeEnum
 from ._Base import _Base
@@ -32,10 +32,10 @@ class ServiceModel(_Base):
     created_at     = Column(DateTime, nullable=False)
     updated_at     = Column(DateTime, nullable=False)
 
-    context           = relationship('ContextModel', back_populates='services')
-    service_endpoints = relationship('ServiceEndPointModel') # lazy='joined', back_populates='service'
-    constraints       = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='service'
-    config_rules      = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='service'
+    context           = relationship('ContextModel', back_populates='services', lazy='selectin')
+    service_endpoints = relationship('ServiceEndPointModel') # lazy='selectin', back_populates='service'
+    constraints       = relationship('ConstraintModel', passive_deletes=True) # lazy='selectin', back_populates='service'
+    config_rules      = relationship('ConfigRuleModel', passive_deletes=True) # lazy='selectin', back_populates='service'
 
     def dump_id(self) -> Dict:
         return {
@@ -43,25 +43,37 @@ class ServiceModel(_Base):
             'service_uuid': {'uuid': self.service_uuid},
         }
 
-    def dump(self) -> Dict:
-        return {
-            'service_id'          : self.dump_id(),
-            'name'                : self.service_name,
-            'service_type'        : self.service_type.value,
-            'service_status'      : {'service_status': self.service_status.value},
-            'service_endpoint_ids': [
-                service_endpoint.endpoint.dump_id()
-                for service_endpoint in sorted(self.service_endpoints, key=operator.attrgetter('position'))
-            ],
-            'service_constraints' : [
-                constraint.dump()
-                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
-            ],
-            'service_config'      : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
+    def dump_endpoint_ids(self) -> List[Dict]:
+        return [
+            service_endpoint.endpoint.dump_id()
+            for service_endpoint in sorted(self.service_endpoints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_constraints(self) -> List[Dict]:
+        return [
+            constraint.dump()
+            for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    def dump(
+        self, include_endpoint_ids : bool = True, include_constraints : bool = True, include_config_rules : bool = True
+    ) -> Dict:
+        result = {
+            'service_id'    : self.dump_id(),
+            'name'          : self.service_name,
+            'service_type'  : self.service_type.value,
+            'service_status': {'service_status': self.service_status.value},
         }
+        if include_endpoint_ids: result['service_endpoint_ids'] = self.dump_endpoint_ids()
+        if include_constraints: result['service_constraints'] = self.dump_constraints()
+        if include_config_rules: result['service_config'] = self.dump_config_rules()
+        return result
 
 class ServiceEndPointModel(_Base):
     __tablename__ = 'service_endpoint'
@@ -70,8 +82,8 @@ class ServiceEndPointModel(_Base):
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position      = Column(Integer, nullable=False)
 
-    service  = relationship('ServiceModel',  back_populates='service_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints'
+    service  = relationship('ServiceModel',  back_populates='service_endpoints') # lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='service_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py
index 7f1550eb2ebb80962bac94374112d43785184374..423af244e186301cf3132eea3fc7cbea16bf9fe9 100644
--- a/src/context/service/database/models/SliceModel.py
+++ b/src/context/service/database/models/SliceModel.py
@@ -16,7 +16,7 @@ import operator
 from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
 from sqlalchemy.dialects.postgresql import UUID
 from sqlalchemy.orm import relationship
-from typing import Dict
+from typing import Dict, List
 from .enums.SliceStatus import ORM_SliceStatusEnum
 from ._Base import _Base
 
@@ -32,13 +32,13 @@ class SliceModel(_Base):
     created_at         = Column(DateTime, nullable=False)
     updated_at         = Column(DateTime, nullable=False)
 
-    context         = relationship('ContextModel', back_populates='slices')
-    slice_endpoints = relationship('SliceEndPointModel') # lazy='joined', back_populates='slice'
-    slice_services  = relationship('SliceServiceModel') # lazy='joined', back_populates='slice'
+    context         = relationship('ContextModel', back_populates='slices', lazy='selectin')
+    slice_endpoints = relationship('SliceEndPointModel') # lazy='selectin', back_populates='slice'
+    slice_services  = relationship('SliceServiceModel') # lazy='selectin', back_populates='slice'
     slice_subslices = relationship(
         'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid')
-    constraints     = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='slice'
-    config_rules    = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='slice'
+    constraints     = relationship('ConstraintModel', passive_deletes=True) # lazy='selectin', back_populates='slice'
+    config_rules    = relationship('ConfigRuleModel', passive_deletes=True) # lazy='selectin', back_populates='slice'
 
     def dump_id(self) -> Dict:
         return {
@@ -46,36 +46,59 @@ class SliceModel(_Base):
             'slice_uuid': {'uuid': self.slice_uuid},
         }
 
-    def dump(self) -> Dict:
+
+    def dump_endpoint_ids(self) -> List[Dict]:
+        return [
+            slice_endpoint.endpoint.dump_id()
+            for slice_endpoint in sorted(self.slice_endpoints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_constraints(self) -> List[Dict]:
+        return [
+            constraint.dump()
+            for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+        ]
+
+    def dump_config_rules(self) -> Dict:
+        return {'config_rules': [
+            config_rule.dump()
+            for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+        ]}
+
+    def dump_service_ids(self) -> List[Dict]:
+        return [
+            slice_service.service.dump_id()
+            for slice_service in self.slice_services
+        ]
+
+    def dump_subslice_ids(self) -> List[Dict]:
+        return [
+            slice_subslice.subslice.dump_id()
+            for slice_subslice in self.slice_subslices
+        ]
+
+    def dump_owner_id(self) -> Dict:
         return {
-            'slice_id'          : self.dump_id(),
-            'name'              : self.slice_name,
-            'slice_status'      : {'slice_status': self.slice_status.value},
-            'slice_endpoint_ids': [
-                slice_endpoint.endpoint.dump_id()
-                for slice_endpoint in sorted(self.slice_endpoints, key=operator.attrgetter('position'))
-            ],
-            'slice_constraints' : [
-                constraint.dump()
-                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
-            ],
-            'slice_config'      : {'config_rules': [
-                config_rule.dump()
-                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
-            ]},
-            'slice_service_ids': [
-                slice_service.service.dump_id()
-                for slice_service in self.slice_services
-            ],
-            'slice_subslice_ids': [
-                slice_subslice.subslice.dump_id()
-                for slice_subslice in self.slice_subslices
-            ],
-            'slice_owner': {
-                'owner_uuid': {'uuid': self.slice_owner_uuid},
-                'owner_string': self.slice_owner_string
-            }
+            'owner_uuid': {'uuid': self.slice_owner_uuid},
+            'owner_string': self.slice_owner_string
+        }
+
+    def dump(
+        self, include_endpoint_ids : bool = True, include_constraints : bool = True, include_service_ids : bool = True,
+        include_subslice_ids : bool = True, include_config_rules : bool = True
+    ) -> Dict:
+        result = {
+            'slice_id'    : self.dump_id(),
+            'name'        : self.slice_name,
+            'slice_status': {'slice_status': self.slice_status.value},
+            'slice_owner' : self.dump_owner_id()
         }
+        if include_endpoint_ids: result['slice_endpoint_ids'] = self.dump_endpoint_ids()
+        if include_constraints : result['slice_constraints' ] = self.dump_constraints()
+        if include_service_ids : result['slice_service_ids' ] = self.dump_service_ids()
+        if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids()
+        if include_config_rules: result['slice_config'      ] = self.dump_config_rules()
+        return result
 
 class SliceEndPointModel(_Base):
     __tablename__ = 'slice_endpoint'
@@ -84,8 +107,8 @@ class SliceEndPointModel(_Base):
     endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position      = Column(Integer, nullable=False)
 
-    slice    = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined')
-    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints'
+    slice    = relationship('SliceModel', back_populates='slice_endpoints') #, lazy='selectin'
+    endpoint = relationship('EndPointModel', lazy='selectin') # back_populates='slice_endpoints'
 
     __table_args__ = (
         CheckConstraint(position >= 0, name='check_position_value'),
@@ -97,8 +120,8 @@ class SliceServiceModel(_Base):
     slice_uuid   = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE' ), primary_key=True)
     service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
-    slice   = relationship('SliceModel', back_populates='slice_services', lazy='joined')
-    service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services'
+    slice   = relationship('SliceModel', back_populates='slice_services') # , lazy='selectin'
+    service = relationship('ServiceModel', lazy='selectin') # back_populates='slice_services'
 
 class SliceSubSliceModel(_Base):
     __tablename__ = 'slice_subslice'
@@ -107,5 +130,5 @@ class SliceSubSliceModel(_Base):
     subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True)
 
     slice    = relationship(
-        'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined')
-    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined')
+        'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices') #, lazy='selectin'
+    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='selectin')
diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py
index 7dc2333f0a9b979f251c173d850a235dcb822d91..0ed4a038bcf4426f4cf112bd03c5cb36cb42c822 100644
--- a/src/context/service/database/models/TopologyModel.py
+++ b/src/context/service/database/models/TopologyModel.py
@@ -27,7 +27,7 @@ class TopologyModel(_Base):
     created_at    = Column(DateTime, nullable=False)
     updated_at    = Column(DateTime, nullable=False)
 
-    context          = relationship('ContextModel', back_populates='topologies')
+    context          = relationship('ContextModel', back_populates='topologies', lazy='selectin')
     topology_devices = relationship('TopologyDeviceModel') # back_populates='topology'
     topology_links   = relationship('TopologyLinkModel'  ) # back_populates='topology'
 
@@ -46,11 +46,19 @@ class TopologyModel(_Base):
         }
 
     def dump_details(self) -> Dict:
+        devices = [
+            td.device.dump(include_config_rules=False, include_components=False)
+            for td in self.topology_devices
+        ]
+        links = [
+            tl.link.dump()
+            for tl in self.topology_links
+        ]
         return {
             'topology_id': self.dump_id(),
             'name'       : self.topology_name,
-            'devices'    : [td.device.dump() for td in self.topology_devices],
-            'links'      : [tl.link.dump()   for tl in self.topology_links  ],
+            'devices'    : devices,
+            'links'      : links,
         }
 
 class TopologyDeviceModel(_Base):
@@ -59,8 +67,8 @@ class TopologyDeviceModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices'
-    device   = relationship('DeviceModel',   lazy='joined') # back_populates='topology_devices'
+    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_devices'
+    device   = relationship('DeviceModel',   lazy='selectin') # back_populates='topology_devices'
 
 class TopologyLinkModel(_Base):
     __tablename__ = 'topology_link'
@@ -68,5 +76,5 @@ class TopologyLinkModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links'
-    link     = relationship('LinkModel',     lazy='joined') # back_populates='topology_links'
+    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_links'
+    link     = relationship('LinkModel',     lazy='selectin') # back_populates='topology_links'
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index 2b08b6c7e03cfd50557f25f99ffea3032dbb811e..205d769acb76992aeba33fc54b7e7b8fbbdc8d06 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -160,6 +160,12 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
                 for error in errors: LOGGER.error(error)
                 raise OperationFailedException('ConfigureDevice', extra_details=errors)
 
+            # Context Performance+Scalability enhancement:
+            # This method, besides P4 logic, does not add/update/delete endpoints.
+            # Remove endpoints to reduce number of inserts done by Context.
+            # TODO: Add logic to inspect endpoints and keep only those ones modified with respect to Context.
+            del device.device_endpoints[:]
+
             # Note: Rules are updated by configure_rules() and deconfigure_rules() methods.
             device_id = context_client.SetDevice(device)
             return device_id
diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py
index 6b98255411aa88ac18bd01474830b3bf268d3483..f928f07b94c71fb6f378161862e96d41af8bde7f 100644
--- a/src/monitoring/service/MetricsDBTools.py
+++ b/src/monitoring/service/MetricsDBTools.py
@@ -264,68 +264,65 @@ class MetricsDB():
                 for kpi in kpi_list:
                     alarm = False
                     kpi_value = kpi[2]
+                    kpiMinIsNone = ((kpiMinValue is None) or math.isnan(kpiMinValue))
+                    kpiMaxIsNone = ((kpiMaxValue is None) or math.isnan(kpiMaxValue))
                     if (kpiMinValue == kpi_value and kpiMaxValue == kpi_value and inRange):
                         alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and includeMaxValue):
                         if (kpi_value >= kpiMinValue and kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and not includeMaxValue):
                         if (kpi_value >= kpiMinValue and kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and includeMaxValue):
                         if (kpi_value > kpiMinValue and kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (
-                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                    elif (inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and not includeMaxValue):
                         if (kpi_value > kpiMinValue and kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and includeMaxValue):
                         if (kpi_value <= kpiMinValue or kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and includeMinValue and not includeMaxValue):
                         if (kpi_value <= kpiMinValue or kpi_value > kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and includeMaxValue):
                         if (kpi_value < kpiMinValue or kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (
-                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                    elif (not inRange and not kpiMinIsNone and not kpiMaxIsNone and not includeMinValue and not includeMaxValue):
                         if (kpi_value < kpiMinValue or kpi_value > kpiMaxValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and includeMinValue):
+                    elif (inRange and not kpiMinIsNone and kpiMaxIsNone and includeMinValue):
                         if (kpi_value >= kpiMinValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                    elif (inRange and not kpiMinIsNone and kpiMaxIsNone and not includeMinValue):
                         if (kpi_value > kpiMinValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                    elif (not inRange and not kpiMinIsNone and kpiMaxIsNone and includeMinValue):
                         if (kpi_value <= kpiMinValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
-                        if (kpi_value <= kpiMinValue):
+                    elif (not inRange and not kpiMinIsNone and kpiMaxIsNone and not includeMinValue):
+                        if (kpi_value < kpiMinValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and includeMaxValue):
+                    elif (inRange and kpiMinIsNone and not kpiMaxIsNone and includeMaxValue):
                         if (kpi_value <= kpiMaxValue):
                             alarm = True
-                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                    elif (inRange and kpiMinIsNone and not kpiMaxIsNone and not includeMaxValue):
                         if (kpi_value < kpiMaxValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                    elif (not inRange and kpiMinIsNone and not kpiMaxIsNone and includeMaxValue):
                         if (kpi_value >= kpiMaxValue):
                             alarm = True
-                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
-                        if (kpi_value >= kpiMaxValue):
+                    elif (not inRange and kpiMinIsNone and not kpiMaxIsNone and not includeMaxValue):
+                        if (kpi_value > kpiMaxValue):
                             alarm = True
                     if alarm:
                         valid_kpi_list.append(kpi)
-                alarm_queue.put_nowait(valid_kpi_list)
-                LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+                if valid_kpi_list:
+                    alarm_queue.put_nowait(valid_kpi_list)
+                    LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+                else:
+                    LOGGER.debug(f"No new alarms triggered for the alarm of KPI {kpi_id}")
             else:
                 LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}")
         except (Exception) as e:
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index f408734df40c1bc5c16b7e108e3ce5a211165f71..3bfef65ff0c52f110b9a091e96b6f6b97dfa79cf 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -12,12 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os, grpc
+import logging, os, grpc
 from queue import Queue
-
 from typing import Iterator
-
-from common.logger import getJSONLogger
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.proto.context_pb2 import Empty
 from common.proto.device_pb2 import MonitoringSettings
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
@@ -25,30 +23,22 @@ from common.proto.monitoring_pb2_grpc import MonitoringServiceServicer
 from common.proto.monitoring_pb2 import AlarmResponse, AlarmDescriptor, AlarmList, SubsList, KpiId, \
     KpiDescriptor, KpiList, KpiQuery, SubsDescriptor, SubscriptionID, AlarmID, KpiDescriptorList, \
     MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse, RawKpiTable, RawKpi, RawKpiList
-from common.method_wrappers.ServiceExceptions import ServiceException
 from common.tools.timestamp.Converters import timestamp_string_to_float, timestamp_utcnow_to_float
-
-from monitoring.service import ManagementDBTools, MetricsDBTools
 from device.client.DeviceClient import DeviceClient
-
-from prometheus_client import Counter, Summary
-
+from monitoring.service import ManagementDBTools, MetricsDBTools
 from monitoring.service.AlarmManager import AlarmManager
 from monitoring.service.NameMapping import NameMapping
 from monitoring.service.SubscriptionManager import SubscriptionManager
 
-LOGGER = getJSONLogger('monitoringservice-server')
-LOGGER.setLevel('DEBUG')
-
-MONITORING_GETINSTANTKPI_REQUEST_TIME = Summary(
-    'monitoring_getinstantkpi_processing_seconds', 'Time spent processing monitoring instant kpi request')
-MONITORING_INCLUDEKPI_COUNTER = Counter('monitoring_includekpi_counter', 'Monitoring include kpi request counter')
+LOGGER = logging.getLogger(__name__)
 
 METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME")
 METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT")
 METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT")
 METRICSDB_TABLE_MONITORING_KPIS = os.environ.get("METRICSDB_TABLE_MONITORING_KPIS")
 
+METRICS_POOL = MetricsPool('Monitoring', 'RPC')
+
 class MonitoringServiceServicerImpl(MonitoringServiceServicer):
     def __init__(self, name_mapping : NameMapping):
         LOGGER.info('Init monitoringService')
@@ -63,514 +53,363 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         LOGGER.info('MetricsDB initialized')
 
     # SetKpi (SetKpiRequest) returns (KpiId) {}
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetKpi(
             self, request: KpiDescriptor, grpc_context: grpc.ServicerContext
     ) -> KpiId:
-        # CREATEKPI_COUNTER_STARTED.inc()
-        LOGGER.info('SetKpi')
-        try:
-            # Here the code to create a sqlite query to crete a KPI and return a KpiID
-            response = KpiId()
-
-            kpi_description = request.kpi_description
-            kpi_sample_type = request.kpi_sample_type
-            kpi_device_id = request.device_id.device_uuid.uuid
-            kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid
-            kpi_service_id = request.service_id.service_uuid.uuid
-            kpi_slice_id = request.slice_id.slice_uuid.uuid
-            kpi_connection_id = request.connection_id.connection_uuid.uuid
-
-
-            if request.kpi_id.kpi_id.uuid != "":
-                response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
-            #     Here the code to modify an existing kpi
-            else:
-                data = self.management_db.insert_KPI(
-                    kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, kpi_slice_id, kpi_connection_id)
-                response.kpi_id.uuid = str(data)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('SetKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('SetKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
+        response = KpiId()
+        kpi_description = request.kpi_description
+        kpi_sample_type = request.kpi_sample_type
+        kpi_device_id = request.device_id.device_uuid.uuid
+        kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid
+        kpi_service_id = request.service_id.service_uuid.uuid
+        kpi_slice_id = request.slice_id.slice_uuid.uuid
+        kpi_connection_id = request.connection_id.connection_uuid.uuid
+        if request.kpi_id.kpi_id.uuid != "":
+            response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+            # Here the code to modify an existing kpi
+        else:
+            data = self.management_db.insert_KPI(
+                kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, kpi_slice_id,
+                kpi_connection_id)
+            response.kpi_id.uuid = str(data)
+        return response
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def DeleteKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Empty:
+        kpi_id = int(request.kpi_id.uuid)
+        kpi = self.management_db.get_KPI(kpi_id)
+        if kpi:
+            self.management_db.delete_KPI(kpi_id)
+        else:
+            LOGGER.info('DeleteKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        return Empty()
 
-        LOGGER.info('DeleteKpi')
-        try:
-            LOGGER.debug(f'DeleteKpi with KpiID: {request.kpi_id.uuid}')
-            kpi_id = int(request.kpi_id.uuid)
-            kpi = self.management_db.get_KPI(kpi_id)
-            if kpi:
-                self.management_db.delete_KPI(kpi_id)
-            else:
-                LOGGER.info('DeleteKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-            return Empty()
-        except ServiceException as e:
-            LOGGER.exception('DeleteKpi exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('DeleteKpi exception')
-
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext) -> KpiDescriptor:
-        LOGGER.info('getting Kpi by KpiID')
-        try:
-            kpi_id = request.kpi_id.uuid
-            kpi_db = self.management_db.get_KPI(int(kpi_id))
-            kpiDescriptor = KpiDescriptor()
-            if kpi_db is None:
-                LOGGER.info('GetKpiDescriptor error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-            else:
-                kpiDescriptor.kpi_description                       = kpi_db[1]
-                kpiDescriptor.kpi_sample_type                       = kpi_db[2]
-                kpiDescriptor.device_id.device_uuid.uuid            = str(kpi_db[3])
-                kpiDescriptor.endpoint_id.endpoint_uuid.uuid        = str(kpi_db[4])
-                kpiDescriptor.service_id.service_uuid.uuid          = str(kpi_db[5])
-                kpiDescriptor.slice_id.slice_uuid.uuid              = str(kpi_db[6])
-                kpiDescriptor.connection_id.connection_uuid.uuid    = str(kpi_db[7])
-            return kpiDescriptor
-        except ServiceException as e:
-            LOGGER.exception('GetKpiDescriptor exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception:  # pragma: no cover
-            LOGGER.exception('GetKpiDescriptor exception')
-
+        kpi_id = request.kpi_id.uuid
+        kpi_db = self.management_db.get_KPI(int(kpi_id))
+        kpiDescriptor = KpiDescriptor()
+        if kpi_db is None:
+            LOGGER.info('GetKpiDescriptor error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        else:
+            kpiDescriptor.kpi_description                       = kpi_db[1]
+            kpiDescriptor.kpi_sample_type                       = kpi_db[2]
+            kpiDescriptor.device_id.device_uuid.uuid            = str(kpi_db[3])
+            kpiDescriptor.endpoint_id.endpoint_uuid.uuid        = str(kpi_db[4])
+            kpiDescriptor.service_id.service_uuid.uuid          = str(kpi_db[5])
+            kpiDescriptor.slice_id.slice_uuid.uuid              = str(kpi_db[6])
+            kpiDescriptor.connection_id.connection_uuid.uuid    = str(kpi_db[7])
+        return kpiDescriptor
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetKpiDescriptorList(self, request: Empty, grpc_context: grpc.ServicerContext) -> KpiDescriptorList:
-
-        LOGGER.info('GetKpiDescriptorList')
-        try:
-            kpi_descriptor_list = KpiDescriptorList()
-
-            data = self.management_db.get_KPIS()
-            LOGGER.debug(f"data: {data}")
-
-            for item in data:
-                kpi_descriptor = KpiDescriptor()
-                kpi_descriptor.kpi_id.kpi_id.uuid                   = str(item[0])
-                kpi_descriptor.kpi_description                      = item[1]
-                kpi_descriptor.kpi_sample_type                      = item[2]
-                kpi_descriptor.device_id.device_uuid.uuid           = str(item[3])
-                kpi_descriptor.endpoint_id.endpoint_uuid.uuid       = str(item[4])
-                kpi_descriptor.service_id.service_uuid.uuid         = str(item[5])
-                kpi_descriptor.slice_id.slice_uuid.uuid             = str(item[6])
-                kpi_descriptor.connection_id.connection_uuid.uuid   = str(item[7])
-
-                kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor)
-
-            return kpi_descriptor_list
-        except ServiceException as e:
-            LOGGER.exception('GetKpiDescriptorList exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetKpiDescriptorList exception')
-
+        kpi_descriptor_list = KpiDescriptorList()
+        data = self.management_db.get_KPIS()
+        LOGGER.debug(f"data: {data}")
+        for item in data:
+            kpi_descriptor = KpiDescriptor()
+            kpi_descriptor.kpi_id.kpi_id.uuid                   = str(item[0])
+            kpi_descriptor.kpi_description                      = item[1]
+            kpi_descriptor.kpi_sample_type                      = item[2]
+            kpi_descriptor.device_id.device_uuid.uuid           = str(item[3])
+            kpi_descriptor.endpoint_id.endpoint_uuid.uuid       = str(item[4])
+            kpi_descriptor.service_id.service_uuid.uuid         = str(item[5])
+            kpi_descriptor.slice_id.slice_uuid.uuid             = str(item[6])
+            kpi_descriptor.connection_id.connection_uuid.uuid   = str(item[7])
+            kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor)
+        return kpi_descriptor_list
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def IncludeKpi(self, request: Kpi, grpc_context: grpc.ServicerContext) -> Empty:
+        kpi_id = request.kpi_id.kpi_id.uuid
+        kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
 
-        LOGGER.info('IncludeKpi')
-
-        try:
-            kpi_id = request.kpi_id.kpi_id.uuid
+        if kpiDescriptor is None:
+            LOGGER.info('IncludeKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        else:
+            kpiSampleType = KpiSampleType.Name(kpiDescriptor.kpi_sample_type).upper().replace('KPISAMPLETYPE_', '')
+            kpiId = kpi_id
+            deviceId = kpiDescriptor.device_id.device_uuid.uuid
+            endpointId = kpiDescriptor.endpoint_id.endpoint_uuid.uuid
+            serviceId = kpiDescriptor.service_id.service_uuid.uuid
+            sliceId   = kpiDescriptor.slice_id.slice_uuid.uuid
+            connectionId = kpiDescriptor.connection_id.connection_uuid.uuid
+            time_stamp = request.timestamp.timestamp
+            kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
+
+            # Build the structure to be included as point in the MetricsDB
+            self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, kpi_value)
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def MonitorKpi(self, request: MonitorKpiRequest, grpc_context: grpc.ServicerContext) -> Empty:
+        kpi_id = int(request.kpi_id.kpi_id.uuid)
+        kpi = self.management_db.get_KPI(kpi_id)
+        response = Empty()
+        if kpi:
+            # Sets the request to send to the device service
+            monitor_device_request = MonitoringSettings()
             kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
+            monitor_device_request.kpi_descriptor.CopyFrom(kpiDescriptor)
+            monitor_device_request.kpi_id.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+            monitor_device_request.sampling_duration_s = request.monitoring_window_s
+            monitor_device_request.sampling_interval_s = request.sampling_rate_s
+            if not self.management_db.check_monitoring_flag(kpi_id):
+                device_client = DeviceClient()
+                device_client.MonitorDeviceKpi(monitor_device_request)
+                self.management_db.set_monitoring_flag(kpi_id,True)
+                self.management_db.check_monitoring_flag(kpi_id)
+            else:
+                LOGGER.warning('MonitorKpi warning: KpiID({:s}) is currently being monitored'.format(str(kpi_id)))
+        else:
+            LOGGER.info('MonitorKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+        return response
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> RawKpiTable:
+        raw_kpi_table = RawKpiTable()
+        kpi_id_list             = request.kpi_ids
+        monitoring_window_s     = request.monitoring_window_s
+        last_n_samples          = request.last_n_samples
+        start_timestamp         = request.start_timestamp.timestamp
+        end_timestamp           = request.end_timestamp.timestamp
+
+        # Check if all the Kpi_ids exist
+        for item in kpi_id_list:
+            kpi_id = item.kpi_id.uuid
+            kpiDescriptor = self.GetKpiDescriptor(item, grpc_context)
             if kpiDescriptor is None:
-                LOGGER.info('IncludeKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+                LOGGER.info('QueryKpiData error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+                break
             else:
-                kpiSampleType = KpiSampleType.Name(kpiDescriptor.kpi_sample_type).upper().replace('KPISAMPLETYPE_', '')
-                kpiId = kpi_id
-                deviceId = kpiDescriptor.device_id.device_uuid.uuid
-                endpointId = kpiDescriptor.endpoint_id.endpoint_uuid.uuid
-                serviceId = kpiDescriptor.service_id.service_uuid.uuid
-                sliceId   = kpiDescriptor.slice_id.slice_uuid.uuid
-                connectionId = kpiDescriptor.connection_id.connection_uuid.uuid
-                time_stamp = request.timestamp.timestamp
-                kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
-
-                # Build the structure to be included as point in the MetricsDB
-                self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, kpi_value)
-
-            return Empty()
-        except ServiceException as e:
-            LOGGER.exception('IncludeKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception:  # pragma: no cover
-            LOGGER.exception('IncludeKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-
-    def MonitorKpi(self, request: MonitorKpiRequest, grpc_context: grpc.ServicerContext) -> Empty:
-
-        LOGGER.info('MonitorKpi')
-        try:
-            kpi_id = int(request.kpi_id.kpi_id.uuid)
-            kpi = self.management_db.get_KPI(kpi_id)
-            response = Empty()
-
-            if kpi:
-                # Sets the request to send to the device service
-                monitor_device_request = MonitoringSettings()
-
-                kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
+                # Execute query per Kpi_id and introduce their kpi_list in the table
+                kpi_list = self.metrics_db.get_raw_kpi_list(kpi_id,monitoring_window_s,last_n_samples,start_timestamp,end_timestamp)
+                raw_kpi_list = RawKpiList()
+                raw_kpi_list.kpi_id.kpi_id.uuid = kpi_id
 
-                monitor_device_request.kpi_descriptor.CopyFrom(kpiDescriptor)
-                monitor_device_request.kpi_id.kpi_id.uuid = request.kpi_id.kpi_id.uuid
-                monitor_device_request.sampling_duration_s = request.monitoring_window_s
-                monitor_device_request.sampling_interval_s = request.sampling_rate_s
+                LOGGER.debug(str(kpi_list))
 
-                if not self.management_db.check_monitoring_flag(kpi_id):
-                    device_client = DeviceClient()
-                    device_client.MonitorDeviceKpi(monitor_device_request)
-                    self.management_db.set_monitoring_flag(kpi_id,True)
-                    self.management_db.check_monitoring_flag(kpi_id)
+                if kpi_list is None:
+                    LOGGER.info('QueryKpiData error: KpiID({:s}): points not found in metrics database'.format(str(kpi_id)))
                 else:
-                    LOGGER.warning('MonitorKpi warning: KpiID({:s}) is currently being monitored'.format(str(kpi_id)))
-            else:
-                LOGGER.info('MonitorKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-            return response
-        except ServiceException as e:
-            LOGGER.exception('MonitorKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('MonitorKpi exception')
-            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
-            # CREATEKPI_COUNTER_FAILED.inc()
+                    for item in kpi_list:
+                        raw_kpi = RawKpi()
+                        raw_kpi.timestamp.timestamp = timestamp_string_to_float(item[0])
+                        raw_kpi.kpi_value.floatVal  = item[1]
+                        raw_kpi_list.raw_kpis.append(raw_kpi)
 
-    def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> RawKpiTable:
+                raw_kpi_table.raw_kpi_lists.append(raw_kpi_list)
+        return raw_kpi_table
 
-        LOGGER.info('QueryKpiData')
-        try:
-            raw_kpi_table = RawKpiTable()
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetKpiSubscription(self, request: SubsDescriptor, grpc_context: grpc.ServicerContext) -> SubsResponse:
+        subs_queue = Queue()
+
+        kpi_id = request.kpi_id.kpi_id.uuid
+        sampling_duration_s = request.sampling_duration_s
+        sampling_interval_s = request.sampling_interval_s
+        start_timestamp = request.start_timestamp.timestamp
+        end_timestamp = request.end_timestamp.timestamp
+
+        subscriber = "localhost"  # Investigate how to get info from the requester
+
+        subs_id = self.management_db.insert_subscription(kpi_id, subscriber, sampling_duration_s,
+                                                            sampling_interval_s, start_timestamp, end_timestamp)
+        self.subs_manager.create_subscription(subs_queue, subs_id, kpi_id, sampling_interval_s, sampling_duration_s,
+                                                start_timestamp, end_timestamp)
+
+        # parse queue to append kpis into the list
+        while True:
+            while not subs_queue.empty():
+                subs_response = SubsResponse()
+                list = subs_queue.get_nowait()
+                for item in list:
+                    kpi = Kpi()
+                    kpi.kpi_id.kpi_id.uuid = str(item[0])
+                    kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
+                    kpi.kpi_value.floatVal = item[2]  # This must be improved
+                    subs_response.kpi_list.kpi.append(kpi)
+                subs_response.subs_id.subs_id.uuid = str(subs_id)
+                yield subs_response
+            if timestamp_utcnow_to_float() > end_timestamp:
+                break
+        # yield subs_response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetSubsDescriptor(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> SubsDescriptor:
+        subs_id = request.subs_id.uuid
+        subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
+        response = SubsDescriptor()
+        if subs_db is None:
+            LOGGER.info('GetSubsDescriptor error: SubsID({:s}): not found in database'.format(str(subs_id)))
+        else:
+            LOGGER.debug(subs_db)
+            response.subs_id.subs_id.uuid = str(subs_db[0])
+            response.kpi_id.kpi_id.uuid = str(subs_db[1])
+            response.sampling_duration_s = subs_db[3]
+            response.sampling_interval_s = subs_db[4]
+            response.start_timestamp.timestamp = subs_db[5]
+            response.end_timestamp.timestamp = subs_db[6]
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetSubscriptions(self, request: Empty, grpc_context: grpc.ServicerContext) -> SubsList:
+        response = SubsList()
+        data = self.management_db.get_subscriptions()
+        for subs_db in data:
+            subs_descriptor = SubsDescriptor()
+            subs_descriptor.subs_id.subs_id.uuid = str(subs_db[0])
+            subs_descriptor.kpi_id.kpi_id.uuid = str(subs_db[1])
+            subs_descriptor.sampling_duration_s = subs_db[3]
+            subs_descriptor.sampling_interval_s = subs_db[4]
+            subs_descriptor.start_timestamp.timestamp = subs_db[5]
+            subs_descriptor.end_timestamp.timestamp = subs_db[6]
+            response.subs_descriptor.append(subs_descriptor)
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def DeleteSubscription(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> Empty:
+        subs_id = int(request.subs_id.uuid)
+        subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
+        if subs_db:
+            self.management_db.delete_subscription(subs_id)
+        else:
+            LOGGER.info('DeleteSubscription error: SubsID({:s}): not found in database'.format(str(subs_id)))
+        return Empty()
 
-            LOGGER.debug(str(request))
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetKpiAlarm(self, request: AlarmDescriptor, grpc_context: grpc.ServicerContext) -> AlarmResponse:
+        response = AlarmID()
+        alarm_description = request.alarm_description
+        alarm_name = request.name
+        kpi_id = request.kpi_id.kpi_id.uuid
+        kpi_min_value = float(request.kpi_value_range.kpiMinValue.floatVal)
+        kpi_max_value = float(request.kpi_value_range.kpiMaxValue.floatVal)
+        in_range = request.kpi_value_range.inRange
+        include_min_value = request.kpi_value_range.includeMinValue
+        include_max_value = request.kpi_value_range.includeMaxValue
+        timestamp = request.timestamp.timestamp
+        LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}")
+        if request.alarm_id.alarm_id.uuid != "":
+            alarm_id = request.alarm_id.alarm_id.uuid
+            # Here the code to modify an existing alarm
+        else:
+            alarm_id = self.management_db.insert_alarm(alarm_description, alarm_name, kpi_id, kpi_min_value,
+                                                        kpi_max_value,
+                                                        in_range, include_min_value, include_max_value)
+            LOGGER.debug(f"AlarmID: {alarm_id}")
+        response.alarm_id.uuid = str(alarm_id)
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetAlarms(self, request: Empty, grpc_context: grpc.ServicerContext) -> AlarmList:
+        response = AlarmList()
+        data = self.management_db.get_alarms()
 
-            kpi_id_list             = request.kpi_ids
-            monitoring_window_s     = request.monitoring_window_s
-            last_n_samples          = request.last_n_samples
-            start_timestamp         = request.start_timestamp.timestamp
-            end_timestamp           = request.end_timestamp.timestamp
+        for alarm in data:
+            alarm_descriptor = AlarmDescriptor()
 
-            # Check if all the Kpi_ids exist
-            for item in kpi_id_list:
-                kpi_id = item.kpi_id.uuid
+            alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm[0])
+            alarm_descriptor.alarm_description = alarm[1]
+            alarm_descriptor.name = alarm[2]
+            alarm_descriptor.kpi_id.kpi_id.uuid = str(alarm[3])
+            alarm_descriptor.kpi_value_range.kpiMinValue.floatVal = alarm[4]
+            alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
+            alarm_descriptor.kpi_value_range.inRange = bool(alarm[6])
+            alarm_descriptor.kpi_value_range.includeMinValue = bool(alarm[7])
+            alarm_descriptor.kpi_value_range.includeMaxValue = bool(alarm[8])
 
-                kpiDescriptor = self.GetKpiDescriptor(item, grpc_context)
-                if kpiDescriptor is None:
-                    LOGGER.info('QueryKpiData error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-                    break
-                else:
-                    # Execute query per Kpi_id and introduce their kpi_list in the table
-                    kpi_list = self.metrics_db.get_raw_kpi_list(kpi_id,monitoring_window_s,last_n_samples,start_timestamp,end_timestamp)
-                    raw_kpi_list = RawKpiList()
-                    raw_kpi_list.kpi_id.kpi_id.uuid = kpi_id
-
-                    LOGGER.debug(str(kpi_list))
-
-                    if kpi_list is None:
-                        LOGGER.info('QueryKpiData error: KpiID({:s}): points not found in metrics database'.format(str(kpi_id)))
-                    else:
-                        for item in kpi_list:
-                            raw_kpi = RawKpi()
-                            raw_kpi.timestamp.timestamp = timestamp_string_to_float(item[0])
-                            raw_kpi.kpi_value.floatVal  = item[1]
-                            raw_kpi_list.raw_kpis.append(raw_kpi)
-
-                    raw_kpi_table.raw_kpi_lists.append(raw_kpi_list)
-
-            return raw_kpi_table
-        except ServiceException as e:
-            LOGGER.exception('QueryKpiData exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('QueryKpiData exception')
+            response.alarm_descriptor.append(alarm_descriptor)
 
-    def SetKpiSubscription(self, request: SubsDescriptor, grpc_context: grpc.ServicerContext) -> SubsResponse:
+        return response
 
-        LOGGER.info('SubscribeKpi')
-        try:
-            subs_queue = Queue()
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetAlarmDescriptor(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> AlarmDescriptor:
+        alarm_id = request.alarm_id.uuid
+        LOGGER.debug(alarm_id)
+        alarm = self.management_db.get_alarm(alarm_id)
+        response = AlarmDescriptor()
+
+        if alarm:
+            LOGGER.debug(f"{alarm}")
+            response.alarm_id.alarm_id.uuid = str(alarm_id)
+            response.alarm_description = alarm[1]
+            response.name = alarm[2]
+            response.kpi_id.kpi_id.uuid = str(alarm[3])
+            response.kpi_value_range.kpiMinValue.floatVal = alarm[4]
+            response.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
+            response.kpi_value_range.inRange = bool(alarm[6])
+            response.kpi_value_range.includeMinValue = bool(alarm[7])
+            response.kpi_value_range.includeMaxValue = bool(alarm[8])
+        else:
+            LOGGER.info('GetAlarmDescriptor error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+            response.alarm_id.alarm_id.uuid = "NoID"
+        return response
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetAlarmResponseStream(
+        self, request: AlarmSubscription, grpc_context: grpc.ServicerContext
+    ) -> Iterator[AlarmResponse]:
+        alarm_id = request.alarm_id.alarm_id.uuid
+        alarm_data = self.management_db.get_alarm(alarm_id)
+        real_start_time = timestamp_utcnow_to_float()
+
+        if alarm_data:
+            LOGGER.debug(f"{alarm_data}")
+            alarm_queue = Queue()
 
-            kpi_id = request.kpi_id.kpi_id.uuid
-            sampling_duration_s = request.sampling_duration_s
-            sampling_interval_s = request.sampling_interval_s
-            start_timestamp = request.start_timestamp.timestamp
-            end_timestamp = request.end_timestamp.timestamp
+            alarm_id = request.alarm_id.alarm_id.uuid
+            kpi_id = alarm_data[3]
+            kpiMinValue = alarm_data[4]
+            kpiMaxValue = alarm_data[5]
+            inRange = alarm_data[6]
+            includeMinValue = alarm_data[7]
+            includeMaxValue = alarm_data[8]
+            subscription_frequency_ms = request.subscription_frequency_ms
+            subscription_timeout_s = request.subscription_timeout_s
 
-            subscriber = "localhost"  # Investigate how to get info from the requester
+            end_timestamp = real_start_time + subscription_timeout_s
 
-            subs_id = self.management_db.insert_subscription(kpi_id, subscriber, sampling_duration_s,
-                                                             sampling_interval_s, start_timestamp, end_timestamp)
-            self.subs_manager.create_subscription(subs_queue, subs_id, kpi_id, sampling_interval_s, sampling_duration_s,
-                                                  start_timestamp, end_timestamp)
+            self.alarm_manager.create_alarm(alarm_queue, alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange,
+                                            includeMinValue, includeMaxValue, subscription_frequency_ms,
+                                            subscription_timeout_s)
 
-            # parse queue to append kpis into the list
             while True:
-                while not subs_queue.empty():
-                    subs_response = SubsResponse()
-                    list = subs_queue.get_nowait()
+                while not alarm_queue.empty():
+                    alarm_response = AlarmResponse()
+                    list = alarm_queue.get_nowait()
+                    size = len(list)
                     for item in list:
                         kpi = Kpi()
                         kpi.kpi_id.kpi_id.uuid = str(item[0])
                         kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
                         kpi.kpi_value.floatVal = item[2]  # This must be improved
-                        subs_response.kpi_list.kpi.append(kpi)
-                    subs_response.subs_id.subs_id.uuid = str(subs_id)
-                    yield subs_response
+                        alarm_response.kpi_list.kpi.append(kpi)
+                    alarm_response.alarm_id.alarm_id.uuid = alarm_id
+                    yield alarm_response
                 if timestamp_utcnow_to_float() > end_timestamp:
                     break
-            # yield subs_response
-        except ServiceException as e:
-            LOGGER.exception('SubscribeKpi exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('SubscribeKpi exception')
-
-    def GetSubsDescriptor(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> SubsDescriptor:
-
-        LOGGER.info('GetSubsDescriptor')
-        try:
-            subs_id = request.subs_id.uuid
-            subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
-            response = SubsDescriptor()
-            if subs_db is None:
-                LOGGER.info('GetSubsDescriptor error: SubsID({:s}): not found in database'.format(str(subs_id)))
-            else:
-                LOGGER.debug(subs_db)
-                response.subs_id.subs_id.uuid = str(subs_db[0])
-                response.kpi_id.kpi_id.uuid = str(subs_db[1])
-                response.sampling_duration_s = subs_db[3]
-                response.sampling_interval_s = subs_db[4]
-                response.start_timestamp.timestamp = subs_db[5]
-                response.end_timestamp.timestamp = subs_db[6]
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetSubsDescriptor exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetSubsDescriptor exception')
-
-    def GetSubscriptions(self, request: Empty, grpc_context: grpc.ServicerContext) -> SubsList:
-
-        LOGGER.info('GetSubscriptions')
-        try:
-            response = SubsList()
-            data = self.management_db.get_subscriptions()
-
-            for subs_db in data:
-                subs_descriptor = SubsDescriptor()
-
-                subs_descriptor.subs_id.subs_id.uuid = str(subs_db[0])
-                subs_descriptor.kpi_id.kpi_id.uuid = str(subs_db[1])
-                subs_descriptor.sampling_duration_s = subs_db[3]
-                subs_descriptor.sampling_interval_s = subs_db[4]
-                subs_descriptor.start_timestamp.timestamp = subs_db[5]
-                subs_descriptor.end_timestamp.timestamp = subs_db[6]
-
-                response.subs_descriptor.append(subs_descriptor)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetSubscriptions exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetSubscriptions exception')
-
-    def DeleteSubscription(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> Empty:
-
-        LOGGER.info('DeleteSubscription')
-        try:
-            LOGGER.debug(f'DeleteSubscription with SubsID: {request.subs_id.uuid}')
-            subs_id = int(request.subs_id.uuid)
-            subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
-            if subs_db:
-                self.management_db.delete_subscription(subs_id)
-            else:
-                LOGGER.info('DeleteSubscription error: SubsID({:s}): not found in database'.format(str(subs_id)))
-            return Empty()
-        except ServiceException as e:
-            LOGGER.exception('DeleteSubscription exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('DeleteSubscription exception')
-
-    def SetKpiAlarm(self, request: AlarmDescriptor, grpc_context: grpc.ServicerContext) -> AlarmResponse:
-
-        LOGGER.info('SetKpiAlarm')
-        try:
-            response = AlarmID()
-
-            alarm_description = request.alarm_description
-            alarm_name = request.name
-            kpi_id = request.kpi_id.kpi_id.uuid
-            kpi_min_value = request.kpi_value_range.kpiMinValue.floatVal
-            kpi_max_value = request.kpi_value_range.kpiMaxValue.floatVal
-            in_range = request.kpi_value_range.inRange
-            include_min_value = request.kpi_value_range.includeMinValue
-            include_max_value = request.kpi_value_range.includeMaxValue
-            timestamp = request.timestamp.timestamp
-
-            LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}")
-
-            if request.alarm_id.alarm_id.uuid != "":
-                alarm_id = request.alarm_id.alarm_id.uuid
-            #     Here the code to modify an existing alarm
-            else:
-                alarm_id = self.management_db.insert_alarm(alarm_description, alarm_name, kpi_id, kpi_min_value,
-                                                           kpi_max_value,
-                                                           in_range, include_min_value, include_max_value)
-                LOGGER.debug(f"AlarmID: {alarm_id}")
-            response.alarm_id.uuid = str(alarm_id)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('SetKpiAlarm exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('SetKpiAlarm exception')
-
-    def GetAlarms(self, request: Empty, grpc_context: grpc.ServicerContext) -> AlarmList:
-
-        LOGGER.info('GetAlarms')
-        try:
-            response = AlarmList()
-            data = self.management_db.get_alarms()
-
-            for alarm in data:
-                alarm_descriptor = AlarmDescriptor()
-
-                alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm[0])
-                alarm_descriptor.alarm_description = alarm[1]
-                alarm_descriptor.name = alarm[2]
-                alarm_descriptor.kpi_id.kpi_id.uuid = str(alarm[3])
-                alarm_descriptor.kpi_value_range.kpiMinValue.floatVal = alarm[4]
-                alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
-                alarm_descriptor.kpi_value_range.inRange = bool(alarm[6])
-                alarm_descriptor.kpi_value_range.includeMinValue = bool(alarm[7])
-                alarm_descriptor.kpi_value_range.includeMaxValue = bool(alarm[8])
-
-                response.alarm_descriptor.append(alarm_descriptor)
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetAlarms exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetAlarms exception')
-
-    def GetAlarmDescriptor(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> AlarmDescriptor:
-
-        LOGGER.info('GetAlarmDescriptor')
-        try:
-            alarm_id = request.alarm_id.uuid
-            LOGGER.debug(alarm_id)
-            alarm = self.management_db.get_alarm(alarm_id)
-            response = AlarmDescriptor()
-
-            if alarm:
-                LOGGER.debug(f"{alarm}")
-                response.alarm_id.alarm_id.uuid = str(alarm_id)
-                response.alarm_description = alarm[1]
-                response.name = alarm[2]
-                response.kpi_id.kpi_id.uuid = str(alarm[3])
-                response.kpi_value_range.kpiMinValue.floatVal = alarm[4]
-                response.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
-                response.kpi_value_range.inRange = bool(alarm[6])
-                response.kpi_value_range.includeMinValue = bool(alarm[7])
-                response.kpi_value_range.includeMaxValue = bool(alarm[8])
-            else:
-                LOGGER.info('GetAlarmDescriptor error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
-                response.alarm_id.alarm_id.uuid = "NoID"
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetAlarmDescriptor exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetAlarmDescriptor exception')
-
-    def GetAlarmResponseStream(self, request: AlarmSubscription, grpc_context: grpc.ServicerContext) -> Iterator[
-        AlarmResponse]:
-
-        LOGGER.info('GetAlarmResponseStream')
-        try:
-            alarm_id = request.alarm_id.alarm_id.uuid
-            alarm_data = self.management_db.get_alarm(alarm_id)
-            real_start_time = timestamp_utcnow_to_float()
-
-            if alarm_data:
-                LOGGER.debug(f"{alarm_data}")
-                alarm_queue = Queue()
-
-                alarm_id = request.alarm_id.alarm_id.uuid
-                kpi_id = alarm_data[3]
-                kpiMinValue = alarm_data[4]
-                kpiMaxValue = alarm_data[5]
-                inRange = alarm_data[6]
-                includeMinValue = alarm_data[7]
-                includeMaxValue = alarm_data[8]
-                subscription_frequency_ms = request.subscription_frequency_ms
-                subscription_timeout_s = request.subscription_timeout_s
-
-                end_timestamp = real_start_time + subscription_timeout_s
-
-                self.alarm_manager.create_alarm(alarm_queue, alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange,
-                                                includeMinValue, includeMaxValue, subscription_frequency_ms,
-                                                subscription_timeout_s)
-
-                while True:
-                    while not alarm_queue.empty():
-                        alarm_response = AlarmResponse()
-                        list = alarm_queue.get_nowait()
-                        size = len(list)
-                        for item in list:
-                            kpi = Kpi()
-                            kpi.kpi_id.kpi_id.uuid = str(item[0])
-                            kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
-                            kpi.kpi_value.floatVal = item[2]  # This must be improved
-                            alarm_response.kpi_list.kpi.append(kpi)
-                        alarm_response.alarm_id.alarm_id.uuid = alarm_id
-                        yield alarm_response
-                    if timestamp_utcnow_to_float() > end_timestamp:
-                        break
-            else:
-                LOGGER.info('GetAlarmResponseStream error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
-                alarm_response = AlarmResponse()
-                alarm_response.alarm_id.alarm_id.uuid = "NoID"
-                return alarm_response
-        except ServiceException as e:
-            LOGGER.exception('GetAlarmResponseStream exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetAlarmResponseStream exception')
+        else:
+            LOGGER.info('GetAlarmResponseStream error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+            alarm_response = AlarmResponse()
+            alarm_response.alarm_id.alarm_id.uuid = "NoID"
+            return alarm_response
 
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def DeleteAlarm(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> Empty:
+        alarm_id = int(request.alarm_id.uuid)
+        alarm = self.management_db.get_alarm(alarm_id)
+        response = Empty()
+        if alarm:
+            self.alarm_manager.delete_alarm(alarm_id)
+            self.management_db.delete_alarm(alarm_id)
+        else:
+            LOGGER.info('DeleteAlarm error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+        return response
 
-        LOGGER.info('DeleteAlarm')
-        try:
-            LOGGER.debug(f'DeleteAlarm with AlarmID: {request.alarm_id.uuid}')
-            alarm_id = int(request.alarm_id.uuid)
-            alarm = self.management_db.get_alarm(alarm_id)
-            response = Empty()
-            if alarm:
-                self.alarm_manager.delete_alarm(alarm_id)
-                self.management_db.delete_alarm(alarm_id)
-            else:
-                LOGGER.info('DeleteAlarm error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
-            return response
-        except ServiceException as e:
-            LOGGER.exception('DeleteAlarm exception')
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('DeleteAlarm exception')
-
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetStreamKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Iterator[Kpi]:
-
-        LOGGER.info('GetStreamKpi')
-
         kpi_id = request.kpi_id.uuid
         kpi_db = self.management_db.get_KPI(int(kpi_id))
         response = Kpi()
@@ -581,36 +420,23 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         else:
             yield response
 
-    @MONITORING_GETINSTANTKPI_REQUEST_TIME.time()
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetInstantKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Kpi:
-
-        LOGGER.info('GetInstantKpi')
-        try:
-            kpi_id = request.kpi_id.uuid
-            response = Kpi()
-            if kpi_id == "":
-                LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
-                response.kpi_id.kpi_id.uuid = "NoID"
+        kpi_id = request.kpi_id.uuid
+        response = Kpi()
+        if kpi_id == "":
+            LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+            response.kpi_id.kpi_id.uuid = "NoID"
+        else:
+            query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE_MONITORING_KPIS} " \
+                    f"WHERE kpi_id = '{kpi_id}' LATEST ON timestamp PARTITION BY kpi_id"
+            data = self.metrics_db.run_query(query)
+            LOGGER.debug(data)
+            if len(data) == 0:
+                response.kpi_id.kpi_id.uuid = request.kpi_id.uuid
             else:
-                query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE_MONITORING_KPIS} " \
-                        f"WHERE kpi_id = '{kpi_id}' LATEST ON timestamp PARTITION BY kpi_id"
-                data = self.metrics_db.run_query(query)
-                LOGGER.debug(data)
-                if len(data) == 0:
-                    response.kpi_id.kpi_id.uuid = request.kpi_id.uuid
-                else:
-                    _data = data[0]
-                    response.kpi_id.kpi_id.uuid = str(_data[0])
-                    response.timestamp.timestamp = timestamp_string_to_float(_data[1])
-                    response.kpi_value.floatVal = _data[2]
-
-            return response
-        except ServiceException as e:
-            LOGGER.exception('GetInstantKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetInstantKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
-            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
-
+                _data = data[0]
+                response.kpi_id.kpi_id.uuid = str(_data[0])
+                response.timestamp.timestamp = timestamp_string_to_float(_data[1])
+                response.kpi_value.floatVal = _data[2]
+        return response
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index c883f9d141fc28645761641b0ccd10294b538bd2..4e84431a5438e1536c92ca644bd5005deba545a4 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -25,7 +25,6 @@ from grpc._channel import _MultiThreadedRendezvous
 from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
-#from common.logger import getJSONLogger
 from common.proto.context_pb2 import DeviceOperationalStatusEnum, EventTypeEnum, DeviceEvent, Device, Empty
 from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
diff --git a/src/pathcomp/backend/Makefile b/src/pathcomp/backend/Makefile
index 058701098308620cd1a71f3718b934c63646d42a..56d249510497785316c2e0c36ea8ee0e28c461b1 100644
--- a/src/pathcomp/backend/Makefile
+++ b/src/pathcomp/backend/Makefile
@@ -30,16 +30,16 @@ coverage: CFLAGS  += -O0 -ggdb -g -DDEBUG -fprofile-arcs -ftest-coverage -DGCOV
 coverage: LDFLAGS += -g -lgcov --coverage -fprofile-arcs -ftest-coverage -DGCOV
 coverage: pathComp-cvr
 
-pathComp: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o 
-	gcc -o pathComp pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o \
+pathComp: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o 
+	gcc -o pathComp pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o \
 		-L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -luuid $(LDFLAGS) $(LDLIBS)
 
-pathComp-dbg: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o 
-	gcc -o pathComp-dbg pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o \
+pathComp-dbg: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o 
+	gcc -o pathComp-dbg pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o \
 		-L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -luuid $(LDFLAGS) $(LDLIBS)
 
-pathComp-cvr: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o 
-	gcc -o pathComp-cvr pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_RESTapi.o \
+pathComp-cvr: pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o 
+	gcc -o pathComp-cvr pathComp.o pathComp_log.o pathComp_cjson.o pathComp_tools.o pathComp_ksp.o pathComp_sp.o pathComp_ear.o pathComp_RESTapi.o \
 		-L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -luuid $(LDFLAGS) $(LDLIBS)
 
 pathComp_log.o: pathComp_log.h pathComp_log.c
@@ -56,8 +56,11 @@ pathComp_ksp.o: pathComp_log.h pathComp_tools.h pathComp_ksp.h pathComp_ksp.c
 
 pathComp_sp.o: pathComp_log.h pathComp_tools.h pathComp_sp.h pathComp_sp.c
 	$(CC) $(CFLAGS) -c pathComp_sp.c -o pathComp_sp.o
+
+pathComp_ear.o: pathComp_log.h pathComp_tools.h pathComp_ear.h pathComp_ear.c
+	$(CC) $(CFLAGS) -c pathComp_ear.c -o pathComp_ear.o
 	
-pathComp_RESTapi.o: pathComp_tools.h pathComp_log.h pathComp_cjson.h pathComp_ksp.h pathComp_sp.h pathComp_RESTapi.h pathComp_RESTapi.c
+pathComp_RESTapi.o: pathComp_tools.h pathComp_log.h pathComp_cjson.h pathComp_ksp.h pathComp_sp.h pathComp_ear.h pathComp_RESTapi.h pathComp_RESTapi.c
 	$(CC) $(CFLAGS) -c pathComp_RESTapi.c -o pathComp_RESTapi.o
 
 pathComp.o: pathComp_log.h pathComp_RESTapi.h pathComp.c pathComp.h
diff --git a/src/pathcomp/backend/pathComp.c b/src/pathcomp/backend/pathComp.c
index aa6c2b7341862a0115581abee7f977edabe93126..537cf378f1d6124ebc7c2a0140c0a408af547254 100644
--- a/src/pathcomp/backend/pathComp.c
+++ b/src/pathcomp/backend/pathComp.c
@@ -48,10 +48,8 @@ void my_gcov_handler(int signum)
 
 // External Variables
 FILE *logfile = NULL;
-
 // PATH COMP IP address API Client
 struct in_addr pathComp_IpAddr;
-
 // REST API ENABLED
 int RESTAPI_ENABLED = 0;
 
diff --git a/src/pathcomp/backend/pathComp_RESTapi.c b/src/pathcomp/backend/pathComp_RESTapi.c
index 82d4b38a840cf813dab850c4cb136ff05b503cbd..1780cfde2039b5907ab0f5696885e17deb56644c 100644
--- a/src/pathcomp/backend/pathComp_RESTapi.c
+++ b/src/pathcomp/backend/pathComp_RESTapi.c
@@ -36,6 +36,7 @@
 #include "pathComp_cjson.h"
 #include "pathComp_ksp.h"
 #include "pathComp_sp.h"
+#include "pathComp_ear.h"
 #include "pathComp_RESTapi.h"
 
 #define ISspace(x) isspace((int)(x))
@@ -50,9 +51,10 @@ guint CLIENT_ID = 0;
 guint32 paId_req = 0;
 
 // Global variables
-struct linkList_t* linkList;
-struct deviceList_t* deviceList;
-struct serviceList_t* serviceList;
+GList* linkList;
+GList* deviceList;
+GList* serviceList;
+GList* activeServList;
 
 gchar algId[MAX_ALG_ID_LENGTH];
 gboolean syncPath = FALSE;
@@ -78,8 +80,9 @@ gint find_rl_client_by_fd (gconstpointer data, gconstpointer userdata)
 	 struct pathComp_client *client = (struct pathComp_client*)data;
      gint fd = *(gint *)userdata; 
      
-    if (client->fd == fd)	
-		return 0;        
+	 if (client->fd == fd) {
+		 return 0;
+	 }
     return -1;	
 }
 
@@ -454,7 +457,6 @@ void rapi_response_json_contents (char *body, gint *length, struct compRouteOutp
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void rapi_response_ok (GIOChannel *source, gint httpCode, struct compRouteOutputList_t *compRouteOutputList) {
-    
 	gint ret = 0;
     
     //DEBUG_PC ("Creating the JSON Body and sending the response of the computed Route List");
@@ -526,8 +528,7 @@ void rapi_response_ok (GIOChannel *source, gint httpCode, struct compRouteOutput
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void rapi_response (GIOChannel *source, gint error)
-{
+void rapi_response (GIOChannel *source, gint error) {
 	 int ret = 0;	
 	 guchar buftmp[1024];
 	 char * buf = g_malloc0 (sizeof (char) * 2048000);
@@ -566,11 +567,94 @@ void rapi_response (GIOChannel *source, gint error)
 	 return;
 }
 
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief parsing topology Identifier Object (contains Context Id and Toplogy UUID) JSON object
+ *
+ * 	@param obj
+ *  @param topology_id
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_topology_Id(cJSON* obj, struct topology_id_t* topology_id) {
+	g_assert(topology_id);
+	// Get the context Id (UUID) from the topologyIdObj
+	cJSON* contextIdObj = cJSON_GetObjectItem(obj, "contextId");
+	if (cJSON_IsString(contextIdObj)) {
+		duplicate_string(topology_id->contextId, contextIdObj->valuestring);
+	}
+	// Get the topologyId (UUID) from the topologyIdObj
+	cJSON* topologyUuidObj = cJSON_GetObjectItem(obj, "topology_uuid");
+	if (cJSON_IsString(topologyUuidObj)) {
+		duplicate_string(topology_id->topology_uuid, topologyUuidObj->valuestring);
+	}
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief parsing EndpointIds JSON object
+ *
+ * 	@param item
+ *  @param serviceEndPointId
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_endPointsIds(cJSON* item, struct service_endpoints_id_t* serviceEndPointId) {
+	// Get the topology Id Object
+	cJSON* topologyIdObj = cJSON_GetObjectItem(item, "topology_id");
+	if (cJSON_IsObject(topologyIdObj)) {
+		parse_topology_Id(topologyIdObj, &serviceEndPointId->topology_id);
+	}
+	// Get the deviceId (UUID)
+	cJSON* deviceIdObj = cJSON_GetObjectItem(item, "device_id");
+	if (cJSON_IsString(deviceIdObj)) {
+		duplicate_string(serviceEndPointId->device_uuid, deviceIdObj->valuestring);
+		DEBUG_PC("DeviceId: %s", serviceEndPointId->device_uuid);
+	}
+	// Get the endpointId (UUID)
+	cJSON* endPointIdObj = cJSON_GetObjectItem(item, "endpoint_uuid");
+	if (cJSON_IsString(endPointIdObj)) {
+		duplicate_string(serviceEndPointId->endpoint_uuid, endPointIdObj->valuestring);
+		DEBUG_PC("EndPointId: %s", serviceEndPointId->endpoint_uuid);
+	}
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to parse the array of Endpoint Ids	of the active services
+ *
+ * 	@param endPointArray
+ *  @param actServ
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_act_service_endPointsIds_array(cJSON* endPointIdArray, struct activeService_t* actServ) {
+	g_assert(actServ);
+
+	for (gint i = 0; i < cJSON_GetArraySize(endPointIdArray); i++) {
+		actServ->num_service_endpoints_id++;
+		struct service_endpoints_id_t* serviceEndPointId = &(actServ->service_endpoints_id[i]);
+		cJSON* item = cJSON_GetArrayItem(endPointIdArray, i);
+		parse_endPointsIds(item, serviceEndPointId);
+	}
+	return;
+}
 
 ///////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_RESTapi.c
- * 	@brief Function used to parse the array of Endpoint Ids
+ * 	@brief Function used to parse the array of Endpoint Ids	of the requested services
  *
  * 	@param endPointArray
  *  @param s
@@ -586,35 +670,7 @@ void parse_service_endPointsIds_array(cJSON* endPointIdArray, struct service_t*
 		struct service_endpoints_id_t* serviceEndPointId = &(s->service_endpoints_id[i]);
 
 		cJSON* item = cJSON_GetArrayItem(endPointIdArray, i);
-
-		// Get the topology Id Object
-		cJSON* topologyIdObj = cJSON_GetObjectItem(item, "topology_id");
-		if (cJSON_IsObject(topologyIdObj)) {
-			// Get the context Id (UUID) from the topologyIdObj
-			cJSON* contextIdObj = cJSON_GetObjectItem(topologyIdObj, "contextId");
-			if (cJSON_IsString(contextIdObj)) {					
-				duplicate_string(serviceEndPointId->topology_id.contextId, contextIdObj->valuestring);
-				//DEBUG_PC("Service EndPoint [%d]-- ContextId: %s (uuid string format)", i + 1, serviceEndPointId->topology_id.contextId);
-			}
-			// Get the topologyId (UUID) from the topologyIdObj
-			cJSON* topologyUuidObj = cJSON_GetObjectItem(topologyIdObj, "topology_uuid");
-			if (cJSON_IsString(topologyUuidObj)) {				
-				duplicate_string(serviceEndPointId->topology_id.topology_uuid, topologyUuidObj->valuestring);
-				//DEBUG_PC("Service Endpoint (%d) -- TopologyId: %s (uuid string format)", i + 1, serviceEndPointId->topology_id.topology_uuid);
-			}			
-		}
-		// Get the deviceId (UUID)
-		cJSON* deviceIdObj = cJSON_GetObjectItem(item, "device_id");
-		if (cJSON_IsString(deviceIdObj)) {			
-			duplicate_string(serviceEndPointId->device_uuid, deviceIdObj->valuestring);
-			DEBUG_PC("[%d] - DeviceId: %s", i + 1, serviceEndPointId->device_uuid);
-		}
-		// Get the endpointId (UUID)
-		cJSON* endPointIdObj = cJSON_GetObjectItem(item, "endpoint_uuid");
-		if (cJSON_IsString(endPointIdObj)) {
-			duplicate_string(serviceEndPointId->endpoint_uuid, endPointIdObj->valuestring);
-			DEBUG_PC("[%d] EndPointId: %s", i + 1, serviceEndPointId->endpoint_uuid);
-		}		
+		parse_endPointsIds(item, serviceEndPointId);
 	}
 	return;
 }
@@ -632,11 +688,8 @@ void parse_service_endPointsIds_array(cJSON* endPointIdArray, struct service_t*
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_service_constraints(cJSON* constraintArray, struct service_t* s) {
-
 	for (gint i = 0; i < cJSON_GetArraySize(constraintArray); i++) {
-
 		s->num_service_constraints++;
-
 		struct constraint_t* constraint = &(s->constraints[i]);
 
 		cJSON* item = cJSON_GetArrayItem(constraintArray, i);
@@ -656,6 +709,38 @@ void parse_service_constraints(cJSON* constraintArray, struct service_t* s) {
 	return;
 }
 
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to parse the serviceId information from a JSON obj
+ *
+ * 	@param obj
+ *  @param serviceId
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parse_json_serviceId(cJSON* obj, struct serviceId_t* serviceId) {
+	g_assert(obj);
+	g_assert(serviceId);
+
+	// Get context Id uuid
+	cJSON* contextIdObj = cJSON_GetObjectItem(obj, "contextId");
+	if (cJSON_IsString(contextIdObj)) {
+		// convert the string in contextId->valuestring in uuid binary format
+		duplicate_string(serviceId->contextId, contextIdObj->valuestring);
+		DEBUG_PC("ContextId: %s (uuid string format)", serviceId->contextId);
+	}
+	// Get service Id uuid
+	cJSON* serviceUuidObj = cJSON_GetObjectItem(obj, "service_uuid");
+	if (cJSON_IsString(serviceUuidObj)) {
+		duplicate_string(serviceId->service_uuid, serviceUuidObj->valuestring);
+		DEBUG_PC("Service UUID: %s (uuid string format)", serviceId->service_uuid);
+	}
+	return;
+}
+
 ///////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_RESTapi.c
@@ -672,15 +757,15 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 
 	for (gint i = 0; i < cJSON_GetArraySize(serviceArray); i++)
 	{
-		serviceList->numServiceList++;
-		struct service_t* service = &(serviceList->services[i]); 
-
+		struct service_t* service = g_malloc0(sizeof(struct service_t));
+		if (service == NULL) {
+			DEBUG_PC("Memory allocation error ...");
+			exit(-1);
+		}
 		cJSON* item = cJSON_GetArrayItem(serviceArray, i);
-		
 		// Get the algorithm Id
 		cJSON* algIdItem = cJSON_GetObjectItem(item, "algId");
-		if (cJSON_IsString(algIdItem))
-		{
+		if (cJSON_IsString(algIdItem)) {
 			duplicate_string(service->algId, algIdItem->valuestring);
 			DEBUG_PC ("algId: %s", service->algId);
 			// assumed that all the services request the same algId
@@ -689,16 +774,13 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 
 		// Get the syncPaths
 		cJSON* synchPathObj = cJSON_GetObjectItemCaseSensitive(item, "syncPaths");
-		if (cJSON_IsBool(synchPathObj))
-		{
+		if (cJSON_IsBool(synchPathObj)) {
 			// Check Synchronization of multiple Paths to attain e.g. global concurrent optimization
-			if (cJSON_IsTrue(synchPathObj))
-			{
+			if (cJSON_IsTrue(synchPathObj)) {
 				syncPath = TRUE;
 				DEBUG_PC("Path Synchronization is required");
 			}
-			if (cJSON_IsFalse(synchPathObj))
-			{
+			if (cJSON_IsFalse(synchPathObj)) {
 				syncPath = FALSE;
 				DEBUG_PC("No Path Synchronization");
 			}
@@ -707,19 +789,7 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 		// Get service Id in terms of contextId and service uuids
 		cJSON* serviceIdObj = cJSON_GetObjectItem(item, "serviceId");
 		if (cJSON_IsObject(serviceIdObj)) {
-			// Get context Id uuid
-			cJSON* contextIdObj = cJSON_GetObjectItem(serviceIdObj, "contextId");
-			if (cJSON_IsString(contextIdObj)) {
-				// convert the string in contextId->valuestring in uuid binary format
-				duplicate_string(service->serviceId.contextId, contextIdObj->valuestring);
-				DEBUG_PC("ContextId: %s (uuid string format)", service->serviceId.contextId);
-			}
-			// Get service Id uuid
-			cJSON* serviceUuidObj = cJSON_GetObjectItem(serviceIdObj, "service_uuid");
-			if (cJSON_IsString(serviceUuidObj)) {				
-				duplicate_string(service->serviceId.service_uuid, serviceUuidObj->valuestring);
-				DEBUG_PC("Service UUID: %s (uuid string format)", service->serviceId.service_uuid);
-			}				
+			parse_json_serviceId(serviceIdObj, &service->serviceId);
 		}		
 
 		// Get de service type
@@ -747,6 +817,9 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
 		if (cJSON_IsNumber(kPathsObj)){
 			service->kPaths = (guint)(kPathsObj->valuedouble);
 		}
+
+		// Append the requested service to the serviceList
+		serviceList = g_list_append(serviceList, service);
 	}
 	return;
 }
@@ -764,7 +837,6 @@ void parsing_json_serviceList_array(cJSON* serviceArray) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_capacity_object(cJSON* capacity, struct capacity_t* c) {
-
 	cJSON* totalSizeObj = cJSON_GetObjectItem(capacity, "total-size");
 	if (cJSON_IsObject(totalSizeObj)) {
 		//Get the capacity value
@@ -794,7 +866,6 @@ void parse_capacity_object(cJSON* capacity, struct capacity_t* c) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d) {
-
 	for (gint i = 0; i < cJSON_GetArraySize(endPointsArray); i++) {
 		d->numEndPoints++;
 		struct endPoint_t* endpoint = &(d->endPoints[i]);
@@ -807,30 +878,17 @@ void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d
 			// Get the topology Id Object
 			cJSON* topologyIdObj = cJSON_GetObjectItem(endPointIdObj, "topology_id");
 			if (cJSON_IsObject(topologyIdObj)) {
-				// Get the context Id (UUID) from the topologyIdObj
-				cJSON* contextIdObj = cJSON_GetObjectItem(topologyIdObj, "contextId");
-				if (cJSON_IsString(contextIdObj)) {
-					duplicate_string(endpoint->endPointId.topology_id.contextId, contextIdObj->valuestring);
-					//DEBUG_PC("Device EndPoint (%d)-- ContextId: %s (uuid string format)", i + 1, endpoint->endPointId.topology_id.contextId);
-				}
-				// Get the topologyId (UUID) from the topologyIdObj
-				cJSON* topologyUuidObj = cJSON_GetObjectItem(topologyIdObj, "topology_uuid");
-				if (cJSON_IsString(topologyUuidObj)) {					
-					duplicate_string(endpoint->endPointId.topology_id.topology_uuid, topologyUuidObj->valuestring);
-					//DEBUG_PC("Device Endpoint (%d) -- TopologyId: %s (uuid string format)", i + 1, endpoint->endPointId.topology_id.topology_uuid);
-				}
+				parse_topology_Id(topologyIdObj, &endpoint->endPointId.topology_id);
 			}
 			// Get the deviceId
 			cJSON* deviceIdObj = cJSON_GetObjectItem(endPointIdObj, "device_id");
 			if (cJSON_IsString(deviceIdObj)) {				
 				duplicate_string(endpoint->endPointId.device_id, deviceIdObj->valuestring);
-				//DEBUG_PC("Device Endpoint (%d) -- Device Id: %s (uuid)", i + 1, endpoint->endPointId.device_id);
 			}
 			// Get the endpoint_uuid
 			cJSON* endPointUuidObj = cJSON_GetObjectItem(endPointIdObj, "endpoint_uuid");
 			if (cJSON_IsString(endPointUuidObj)) {				
 				duplicate_string(endpoint->endPointId.endpoint_uuid, endPointUuidObj->valuestring);
-				//DEBUG_PC("Device Endpoint (%d) -- EndPoint Uuid: %s (uuid)", i + 1, endpoint->endPointId.endpoint_uuid);
 			}
 		}
 		// Get the EndPoint Type
@@ -889,6 +947,20 @@ void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d
 				//DEBUG_PC("Inter-Domain Remote Id: %s", endpoint->inter_domain_plug_in.inter_domain_plug_in_remote_id);
 			}
 		}
+
+		// Energy consumption per endPoint port
+		cJSON* energyPortObj = cJSON_GetObjectItem(item, "energy_consumption");
+		if (cJSON_IsNumber(energyPortObj)) {
+			memcpy(&endpoint->energyConsumption, &energyPortObj->valuedouble, sizeof(gdouble));
+			DEBUG_PC("Endpoint Energy Consumption: %f", endpoint->energyConsumption);
+		}
+
+		// Endpoint Operational Status
+		cJSON* operationalStatusObj = cJSON_GetObjectItem(item, "operational_status");
+		if (cJSON_IsNumber(operationalStatusObj)) {
+			endpoint->operational_status = (gint)(operationalStatusObj->valuedouble);
+			DEBUG_PC("Endpoint Operational Status: %d", endpoint->operational_status);
+		}
 	}
 	return;
 }
@@ -907,11 +979,28 @@ void parse_json_device_endpoints_array(cJSON* endPointsArray, struct device_t* d
 void parsing_json_deviceList_array(cJSON* deviceArray) {
 	DEBUG_PC("");
 	DEBUG_PC("========= PARSING DEVICE LIST ============");
-	for (gint i = 0; i < cJSON_GetArraySize(deviceArray); i++) {
-		deviceList->numDevices++;
-		struct device_t* d = &(deviceList->devices[i]);
+	for (gint i = 0; i < cJSON_GetArraySize(deviceArray); i++) {		
+		struct device_t* d = g_malloc0(sizeof(struct device_t));
+		if (d == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
 		cJSON* item = cJSON_GetArrayItem(deviceArray, i);
 
+		// Get the power idle of the switch
+		cJSON* powerIdleObj = cJSON_GetObjectItem(item, "power_idle");
+		if (cJSON_IsNumber(powerIdleObj)) {
+			memcpy(&d->power_idle, &powerIdleObj->valuedouble, sizeof(gdouble));
+			DEBUG_PC("Power Idle: %f", d->power_idle);
+		}
+
+		// Get the operational state
+		cJSON* opeStatusObj = cJSON_GetObjectItem(item, "operational_status");
+		if (cJSON_IsNumber(opeStatusObj)) {
+			d->operational_status = (gint)(opeStatusObj->valuedouble);
+			DEBUG_PC("Operational Status: %d (0 Undefined, 1 Disabled, 2 Enabled", d->operational_status);
+		}
+
 		// Get the device UUID
 		cJSON* deviceUuidObj = cJSON_GetObjectItem(item, "device_Id");
 		if (cJSON_IsString(deviceUuidObj)) {
@@ -932,6 +1021,8 @@ void parsing_json_deviceList_array(cJSON* deviceArray) {
 		if (cJSON_IsArray(deviceEndpointsArray)) {
 			parse_json_device_endpoints_array(deviceEndpointsArray, d);
 		}
+		// append the device into the deviceList
+		deviceList = g_list_append(deviceList, d);
 	}
 	return;
 }
@@ -949,7 +1040,6 @@ void parsing_json_deviceList_array(cJSON* deviceArray) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parse_json_link_endpoints_array(cJSON *endPointsLinkObj, struct link_t* l) {
-
 	for (gint i = 0; i < cJSON_GetArraySize(endPointsLinkObj); i++) {
 		//DEBUG_PC("link: %s has %d endPointIds", l->linkId, l->numLinkEndPointIds);
 		l->numLinkEndPointIds++;
@@ -963,18 +1053,7 @@ void parse_json_link_endpoints_array(cJSON *endPointsLinkObj, struct link_t* l)
 			// Get the topology Id Object
 			cJSON* topologyIdObj = cJSON_GetObjectItem(endPointIdObj, "topology_id");
 			if (cJSON_IsObject(topologyIdObj)) {
-				// Get the context Id (UUID) from the topologyIdObj
-				cJSON* contextIdObj = cJSON_GetObjectItem(topologyIdObj, "contextId");
-				if (cJSON_IsString(contextIdObj)) {					
-					duplicate_string(endPointLink->topology_id.contextId, contextIdObj->valuestring);
-					//DEBUG_PC("Link EndPoint (%d)-- ContextId: %s (uuid string format)", i + 1, endPointLink->topology_id.contextId);
-				}
-				// Get the topologyId (UUID) from the topologyIdObj
-				cJSON* topologyUuidObj = cJSON_GetObjectItem(topologyIdObj, "topology_uuid");
-				if (cJSON_IsString(topologyUuidObj)) {
-					duplicate_string(endPointLink->topology_id.topology_uuid, topologyUuidObj->valuestring);
-					//DEBUG_PC("Link Endpoint (%d) -- TopologyId: %s (uuid string format)", i + 1, endPointLink->topology_id.topology_uuid);
-				}
+				parse_topology_Id(topologyIdObj, &endPointLink->topology_id);				
 			}
 			// Get the deviceId
 			cJSON* deviceIdObj = cJSON_GetObjectItem(endPointIdObj, "device_id");
@@ -1006,21 +1085,23 @@ void parse_json_link_endpoints_array(cJSON *endPointsLinkObj, struct link_t* l)
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void parsing_json_linkList_array(cJSON* linkListArray) {
-
 	DEBUG_PC("");
 	DEBUG_PC("======= PARSING OF THE LINK LIST ARRAY ==========");
-	for (gint i = 0; i < cJSON_GetArraySize(linkListArray); i++) {
-		linkList->numLinks++;
-		struct link_t* l = &(linkList->links[i]);
-		//l->numLinkEndPointIds = 0;
-
+	for (gint i = 0; i < cJSON_GetArraySize(linkListArray); i++) {		
+		struct link_t* l = g_malloc0(sizeof(struct link_t));
+		if (l == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
 		cJSON* item = cJSON_GetArrayItem(linkListArray, i);
+
 		// Get the link Id (uuid)
 		cJSON* linkIdObj = cJSON_GetObjectItem(item, "link_Id");
 		if (cJSON_IsString(linkIdObj)) {
 			duplicate_string(l->linkId, linkIdObj->valuestring);
 			DEBUG_PC(" * Link (%d) -- Id: %s (uuid)", i + 1, l->linkId);
 		}
+
 		// Get the link endpoints (assumed to be p2p)
 		cJSON* endPointsLinkObj = cJSON_GetObjectItem(item, "link_endpoint_ids");
 		if (cJSON_IsArray(endPointsLinkObj)) {
@@ -1083,6 +1164,7 @@ void parsing_json_linkList_array(cJSON* linkListArray) {
 				//DEBUG_PC("Link (%d) -- Latency: %f", i + 1, l->latency_characteristics.fixed_latency);
 			}	
 		}
+		linkList = g_list_append(linkList, l);
 	}
 	return;
 }
@@ -1100,15 +1182,22 @@ void parsing_json_linkList_array(cJSON* linkListArray) {
 void generate_reverse_linkList() {
 	DEBUG_PC("");
 	DEBUG_PC("CREATION OF REVERSE LINKS");
-	gint numLinks = linkList->numLinks;
-	
-	for (gint i = 0; i < numLinks; i++) {
-		struct link_t* refLink = &(linkList->links[i]);
-		struct link_t* newLink = &(linkList->links[numLinks + i]);
-		linkList->numLinks++;
+	gint numLinks = g_list_length (linkList);
+	DEBUG_PC("Initial Number of links in the main List: %d", numLinks);
+	gint i = 0;
+	for (GList* ln = g_list_first(linkList);
+		(ln) && (i < numLinks);
+		ln = g_list_next(ln), i++)
+	{
+		struct link_t* refLink = (struct link_t*)(ln->data);
+		struct link_t* newLink = g_malloc0(sizeof(struct link_t));
+		if (newLink == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
 		// Copy the linkId + appending "_rev"
 		duplicate_string(newLink->linkId, refLink->linkId);
-		strcat(newLink->linkId, "_rev");
+		strcat(newLink->linkId, "_rev"); 		
 
 		//DEBUG_PC("refLink: %s // newLink: %s", refLink->linkId, newLink->linkId);
 
@@ -1121,7 +1210,7 @@ void generate_reverse_linkList() {
 			exit(-1);
 		}
 #endif
-		DEBUG_PC(" * Link[%d] -- Id: %s", numLinks + i, newLink->linkId);
+		//DEBUG_PC(" * Link[%d] -- Id: %s", numLinks + i, newLink->linkId);
 
 		//DEBUG_PC("Number of Endpoints in Link: %d", refLink->numLinkEndPointIds);
 		for (gint j = refLink->numLinkEndPointIds - 1, m = 0; j >= 0; j--, m++) {			
@@ -1131,9 +1220,9 @@ void generate_reverse_linkList() {
 			duplicate_string(newEndPId->topology_id.contextId, refEndPId->topology_id.contextId);
 			duplicate_string(newEndPId->topology_id.topology_uuid, refEndPId->topology_id.topology_uuid);
 			//duplicate the deviceId and endPoint_uuid
-			duplicate_string(newEndPId->deviceId, refEndPId->deviceId);
-			duplicate_string(newEndPId->endPointId, refEndPId->endPointId);
-			DEBUG_PC("refLink Endpoint[%d]: %s(%s)", j, refEndPId->deviceId, refEndPId->endPointId);
+			duplicate_string(newEndPId->deviceId, refEndPId->endPointId);
+			duplicate_string(newEndPId->endPointId, refEndPId->deviceId);
+			//DEBUG_PC("refLink Endpoint[%d]: %s(%s)", j, refEndPId->deviceId, refEndPId->endPointId);
 			//DEBUG_PC("newLink Endpoint[%d]: %s(%s)", m, newEndPId->deviceId, newEndPId->endPointId);
 			newLink->numLinkEndPointIds++;
 		}
@@ -1155,11 +1244,87 @@ void generate_reverse_linkList() {
 
 		// duplicate latency characteristics
 		memcpy(&newLink->latency_characteristics.fixed_latency, &refLink->latency_characteristics.fixed_latency, sizeof(gdouble));
+		// Append in the linkList the new creted Link
+		linkList = g_list_append(linkList, newLink);
 	}
-	DEBUG_PC("Terminating Reverse Links [total: %d]", linkList->numLinks);
+	DEBUG_PC("Terminating Reverse Links [total links: %d]", g_list_length(linkList));
 	return;
 }
 
+///////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to parse the JSON object/s for active services
+ *
+ * 	@param actServiceArray
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void parsing_json_activeService_array(cJSON* actServiceArray) {
+	DEBUG_PC("");
+	DEBUG_PC("====== PARSING THE JSON CONTENTS OF THE ACTIVE SERVICES =======");
+	
+	for (gint i = 0; i < cJSON_GetArraySize(actServiceArray); i++) {
+		struct activeService_t* actServ = g_malloc0(sizeof(struct activeService_t));
+		if (actServ == NULL) {
+			DEBUG_PC("Memory Allocation Failure");
+			exit(-1);
+		}
+		cJSON* item = cJSON_GetArrayItem(actServiceArray, i);
+		// ServiceId
+		cJSON* serviceIdObj = cJSON_GetObjectItem(item, "serviceId");
+		if (cJSON_IsObject(serviceIdObj)) {
+			parse_json_serviceId(serviceIdObj, &actServ->serviceId);
+		}
+		// Service Type
+		cJSON* serviceTypeObj = cJSON_GetObjectItem(item, "serviceType");
+		if (cJSON_IsNumber(serviceTypeObj))
+		{
+			actServ->service_type = (guint)(serviceTypeObj->valuedouble);
+			print_service_type(actServ->service_type);
+		}
+		// Service Endpoints
+		cJSON* endPointIdsArray = cJSON_GetObjectItem(item, "service_endpoints_ids");
+		if (cJSON_IsArray(endPointIdsArray)) {
+			parse_act_service_endPointsIds_array(endPointIdsArray, actServ);
+		}
+		// Parsing the active service path
+		actServ->activeServPath = NULL;
+		cJSON* actServPathArray = cJSON_GetObjectItem(item, "devices");
+		if (cJSON_IsArray(endPointIdsArray)) {
+			for (gint j = 0; j < cJSON_GetArraySize(actServPathArray); j++) {
+				struct activeServPath_t* actServPath = g_malloc0(sizeof(struct activeServPath_t));
+				if (actServPath == NULL) {
+					DEBUG_PC("Memory Allocation Failure");
+					exit(-1);
+				}
+				cJSON* item2 = cJSON_GetArrayItem(item, j);
+				// Topology Id
+				cJSON* topologyIdObj = cJSON_GetObjectItem(item2, "topology_id");
+				if (cJSON_IsObject(topologyIdObj)) {
+					parse_topology_Id(topologyIdObj, &actServPath->topology_id);
+				}
+				// Device Id
+				cJSON* deviceIdObj = cJSON_GetObjectItem(item2, "device_id");
+				if (cJSON_IsString(deviceIdObj)) {
+					duplicate_string(actServPath->deviceId, deviceIdObj->valuestring);
+				}
+				// EndPointId
+				cJSON* endPointUUIDObj = cJSON_GetObjectItem(item2, "endpoint_uuid");
+				if (cJSON_IsString(endPointUUIDObj)) {
+					duplicate_string(actServPath->endPointId, endPointUUIDObj->valuestring);
+				}
+				// Append element from the Active Service Path (i.e.,topologyId, deviceId and endpointId)
+				actServ->activeServPath = g_list_append(actServ->activeServPath, actServPath);
+			}
+		}
+		// append into the Actice Service List
+		activeServList = g_list_append(activeServList, actServ);
+	}
+	return;
+}
 
 ///////////////////////////////////////////////////////////////////////////////////////
 /**
@@ -1176,22 +1341,6 @@ void generate_reverse_linkList() {
 /////////////////////////////////////////////////////////////////////////////////////////
 void parsing_json_obj_pathComp_request(cJSON * root, GIOChannel * source)
 {
-	//DEBUG_PC("**");
-	if (deviceList == NULL){	
-	  	DEBUG_PC ("Device List does not exist ... STOP");
-	  	exit(-1);
-	}
-
-	if (linkList == NULL) {
-		DEBUG_PC("Link List does not exist ... STOP")
-	}
-	
-	if (serviceList == NULL)
-	{
-		DEBUG_PC ("Service List does not exist ... STOP");
-		exit(-1);       
-	} 
-
 	// Set of services to seek their path and resource selection
 	cJSON* serviceListArray = cJSON_GetObjectItem(root, "serviceList");
 	if (cJSON_IsArray(serviceListArray)) {
@@ -1211,10 +1360,15 @@ void parsing_json_obj_pathComp_request(cJSON * root, GIOChannel * source)
 
 		// In the context information, if solely the list of links are passed for a single direction, 
 		// the reverse direction MUST be created sythetically 
-		
 		// LGR: deactivated; link duplication needs to be done smartly with TAPI. done manually in topology by now
 		//generate_reverse_linkList();
 	}
+
+	// Get the list of active services
+	cJSON* actServiceArray = cJSON_GetObjectItem(root, "activeServList");
+	if (cJSON_IsArray(actServiceArray)) {
+		parsing_json_activeService_array(actServiceArray);
+	}
 	return;
 }
 
@@ -1295,19 +1449,16 @@ struct pathComp_client * RESTapi_client_create (GIOChannel * channel_client, gin
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void RESTapi_client_close (struct pathComp_client* client)
-{
+void RESTapi_client_close (struct pathComp_client* client) {
 	//DEBUG_PC("Closing the client (Id: %d) %p", client->type, client);
 	//DEBUG_PC("Client ibuf: %p || obuf: %p", client->ibuf, client->obuf);
 	
-	if (client->ibuf != NULL)
-	{
+	if (client->ibuf != NULL) {
 		//DEBUG_PC("Client ibuf: %p", client->ibuf);
 		stream_free(client->ibuf);
 		client->ibuf = NULL;
 	}
-	if (client->obuf != NULL)
-	{
+	if (client->obuf != NULL) {
 		//DEBUG_PC("Client obuf: %p", client->obuf);
 		stream_free(client->obuf);
 		client->obuf = NULL;
@@ -1333,16 +1484,14 @@ void RESTapi_client_close (struct pathComp_client* client)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void RESTapi_close_operations (GIOChannel * source)
-{
+void RESTapi_close_operations (GIOChannel * source)	{
 	gint fd = g_io_channel_unix_get_fd (source);
 	
 	//DEBUG_PC ("Stop all the operations over the fd: %d", fd);	
 	g_io_channel_flush(source, NULL);
 	GError *error = NULL;    
 	g_io_channel_shutdown (source, TRUE, &error);
-	if(error)
-	{
+	if(error) {
 		DEBUG_PC ("An error occurred ...");
 	}
 	g_io_channel_unref (source);
@@ -1362,8 +1511,7 @@ void RESTapi_close_operations (GIOChannel * source)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void RESTapi_stop (struct pathComp_client* client, GIOChannel * source, gint fd)
-{
+void RESTapi_stop (struct pathComp_client* client, GIOChannel * source, gint fd) {
 	
 	DEBUG_PC("Client Socket: %d is Stopped", fd);
 	// remove client
@@ -1387,38 +1535,31 @@ void RESTapi_stop (struct pathComp_client* client, GIOChannel * source, gint fd)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint RESTapi_get_line (GIOChannel *channel, gchar *buf, gint size)
-{
+gint RESTapi_get_line (GIOChannel *channel, gchar *buf, gint size) {
     gint i = 0;
     //DEBUG_PC ("\n");
     //DEBUG_PC ("----- Read REST API Line(\r\n) ------");
     gint n = 0;
     guchar c = '\0'; // END OF FILE    
     gboolean cr = FALSE;
-    while (i < size - 1)
-    {
+    while (i < size - 1) {
 		n = read_channel (channel, &c, 1);		
-		if (n == -1)
-		{
+		if (n == -1) {
 			//DEBUG_PC ("Close the channel and eliminate the client");
 			return -1;			
 		}	
-		if (n > 0)
-		{
+		if (n > 0) {
 			//DEBUG_PC ("%c", c);
 			buf[i] = c;
 			i++;	
-			if (c == '\r')
-			{
+			if (c == '\r') 			{
 				cr = TRUE;	      
 			}	  
-			if ((c == '\n') && (cr == TRUE))
-			{	   
+			if ((c == '\n') && (cr == TRUE)) 			{	   
 				break;
 			}	        
 		} 
-		else
-		{
+		else {
 			c = '\n';
 			buf[i] = c;
 			i++;
@@ -1447,8 +1588,7 @@ guint RESTapi_get_method (gchar *buf, gint *j)
 	guint RestApiMethod = 0;
 	gchar method[255];
 	gint i = 0;	
-	while (!ISspace(buf[*j]) && (i < sizeof(method) - 1))
-	{
+	while (!ISspace(buf[*j]) && (i < sizeof(method) - 1)) {
 		method[i] = buf[*j];
 		i++; 
 		*j = *j + 1;
@@ -1458,32 +1598,60 @@ guint RESTapi_get_method (gchar *buf, gint *j)
 	
 	// Check that the methods are GET, POST or PUT
 	if (strcasecmp((const char *)method, "GET") && strcasecmp((const char *)method, "POST") && 
-		strcasecmp ((const char *)method, "HTTP/1.1") && strcasecmp ((const char *)method, "PUT"))
-	{
-		DEBUG_PC ("The method: %s is not currently supported ...", method);
+		strcasecmp ((const char *)method, "HTTP/1.1") && strcasecmp ((const char *)method, "PUT")) {
+		DEBUG_PC ("%s is not a method ...", method);
 		return RestApiMethod;	
 	}
 	// Method selector
-	if (strncmp ((const char*)method, "GET", 3) == 0)
-	{
+	if (strncmp ((const char*)method, "GET", 3) == 0) {
 		RestApiMethod = REST_API_METHOD_GET;		
 	}
-	else if (strncmp ((const char*)method, "POST", 4) == 0)
-	{
+	else if (strncmp ((const char*)method, "POST", 4) == 0) {
 		RestApiMethod = REST_API_METHOD_POST;
 	}	
-	else if (strncmp ((const char *)method, "HTTP/1.1", 8) == 0)
-	{
+	else if (strncmp ((const char *)method, "HTTP/1.1", 8) == 0) {
 		RestApiMethod = REST_API_METHOD_HTTP;
 	}
-	else if (strncmp ((const char *)method, "PUT", 3) == 0)
-	{
+	else if (strncmp ((const char *)method, "PUT", 3) == 0) {
 		RestApiMethod = REST_API_METHOD_PUT;
-	}
-	
+	}	
 	return RestApiMethod;	
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_RESTapi.c
+ * 	@brief Function used to check whether it is a supported method, and return the associated numerical id
+ *
+ * 	@param method
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+guint is_rest_api_method(char *method) {
+	guint RestApiMethod = 0;
+	if (strcasecmp((const char*)method, "GET") && strcasecmp((const char*)method, "POST") &&
+		strcasecmp((const char*)method, "HTTP/1.1") && strcasecmp((const char*)method, "PUT")) {
+		DEBUG_PC("The method: %s is not currently supported ...", method);
+		return RestApiMethod;
+	}
+	// Method selector
+	if (strncmp((const char*)method, "GET", 3) == 0) {
+		RestApiMethod = REST_API_METHOD_GET;
+	}
+	else if (strncmp((const char*)method, "POST", 4) == 0) {
+		RestApiMethod = REST_API_METHOD_POST;
+	}
+	else if (strncmp((const char*)method, "HTTP/1.1", 8) == 0) {
+		RestApiMethod = REST_API_METHOD_HTTP;
+	}
+	else if (strncmp((const char*)method, "PUT", 3) == 0) {
+		RestApiMethod = REST_API_METHOD_PUT;
+	}
+	return RestApiMethod;
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_RESTapi.c
@@ -1537,8 +1705,7 @@ gint get_url (gchar *buf, gint *j, gchar *url)
 /////////////////////////////////////////////////////////////////////////////////////////
 gint get_version (gchar *buf, gint *j, gchar *version) {
 	// Skip space char
-	while (ISspace(buf[*j]) && (*j < strlen(buf)))
-	{
+	while (ISspace(buf[*j]) && (*j < strlen(buf))) {
 		*j = *j + 1;
 	}	
 	//DEBUG_PC ("buf[%d]: %c", *j, buf[*j]);
@@ -1578,8 +1745,7 @@ gint triggering_routeComp (struct compRouteOutputList_t *compRouteList, gchar *a
 	DEBUG_PC("Requested Algorithm: %s", algId);
 	//////////////////// Algorithm Selector (RAId)//////////////////////////////////////	
 	// KSP algorithm
-	if (strncmp ((const char*)algId, "KSP", 3) == 0)
-	{
+	if (strncmp ((const char*)algId, "KSP", 3) == 0) {
 		DEBUG_PC ("Alg Id: KSP");
 		httpCode = pathComp_ksp_alg(compRouteList);
 	}
@@ -1588,20 +1754,11 @@ gint triggering_routeComp (struct compRouteOutputList_t *compRouteList, gchar *a
 		DEBUG_PC("Alg Id: SP");
 		httpCode = pathComp_sp_alg(compRouteList);
 	}
-#if 0
-	// Infrastructure Abstraction (InA)
-	else if (strncmp ((const char*)raId, "InA", 3) == 0) 
-	{
-		//DEBUG_PC ("RA: InA");
-		httpCode = ra_InA_alg (compRouteList);
+	// energy-aware routing
+	else if (strncmp((const char*)algId, "EAR", 3) == 0) {
+		DEBUG_PC("Alg Id: Energy Aware Routing, EAR");
+		httpCode = pathComp_ear_alg(compRouteList);
 	}
-	// Global Concurrent Optimization (GCO): Resoration / Re-Allocation / Re-Optimization
-	else if (strncmp ((const char*)raId, "GCO", 3) == 0)
-	{
-		//DEBUG_PC ("RA: GCO");
-		httpCode = ra_GCO_alg (compRouteList);	
-	}
-#endif
 	return httpCode;
 }
 
@@ -1618,8 +1775,7 @@ gint triggering_routeComp (struct compRouteOutputList_t *compRouteList, gchar *a
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
-{  
+gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data) {  
 	/** some checks */
 	g_assert(source != NULL);
 	g_assert(data != NULL);	
@@ -1637,24 +1793,21 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	gint fd = g_io_channel_unix_get_fd (source);
 	DEBUG_PC ("fd: %d, cond: %d", fd, cond);
 
-	if (cond != G_IO_IN)
-	{
+	if (cond != G_IO_IN) {
 		DEBUG_PC ("Something happening with the channel and fd ... (cond: %d)", cond);
 		RESTapi_stop(client, source, fd);
 		return FALSE;
 	}	
-	/** Clear input buffer. */
+	// Clear input buffer
 	stream_reset (client->ibuf);
 
 	// get line
 	gint nbytes = RESTapi_get_line (source, buf, sizeof (buf));
-	if (nbytes == -1)
-	{
+	if (nbytes == -1) {
 		DEBUG_PC ("nbytes -1 ... CLOSE CLIENT FD and eliminate CLIENT");						
 		RESTapi_stop(client, source, fd);
 		return FALSE;						
-	}		
-	
+	}	
 	if ((buf[0] == '\n') && (nbytes  == 1))
 	{
 		//DEBUG_PC (" -- buf[0] = newline --");
@@ -1663,95 +1816,90 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	}
 	
 	gint i = 0, j = 0;
-	// Get the REST Method
-	guint RestApiMethod = RESTapi_get_method (buf, &j);
-	if (RestApiMethod == 0) 	{
-		DEBUG_PC ("The method is NOT supported ...");
-		RESTapi_unimplemented (source);
-		RESTapi_stop(client, source, fd);
-		return FALSE;
-	}
 
-	// get the REST url
-	gchar url[255];
-	i = get_url (buf, &j, url);	
-	url[i] = '\0';	
-
-	// GET - used for checking status of pathComp ... used url /pathComp/api/v1/health
-	if (RestApiMethod == REST_API_METHOD_GET) {
-		if (strncmp((const char*)url, "/health", 7) != 0) {
-			DEBUG_PC("unknown url [%s] for GET method -- Heatlh function", url);
-			RESTapi_stop(client, source, fd);
-			exit(-1);
+	while (1) {
+		DEBUG_PC("%s", buf);
+		char word[255];		
+		while (!ISspace(buf[j]) && (i < sizeof(word) - 1)) {
+			word[i] = buf[j]; i++; j++;
 		}
-		else {
-			DEBUG_PC("Sending API Response OK to health requests");
-			rapi_response_ok(source, HTTP_RETURN_CODE_OK, NULL);
-			return TRUE;
+		word[i] = '\0';
+		// Check if word is bound to a Method, i.e., POST, GET, HTTP/1.1.
+		guint method = is_rest_api_method(word);
+		if (method == 0) {
+			 // ignore other REST fields i.e., Host:, User-Agent:, Accept: ....			
+			break;
 		}
-	}
-
-	// for method POST, PUT check that the url is "/pathComp"
-	if (RestApiMethod == REST_API_METHOD_POST) {
-		if (strncmp((const char*)url, "/pathComp/api/v1/compRoute", 26) != 0)
-		{
-			DEBUG_PC("Unknown url: %s", url);
-			RESTapi_stop(client, source, fd);
-			exit(-1);
+		// word is bound to a known / supported REST Method
+		else {
+			gchar url[255];
+			i = get_url(buf, &j, url);
+			url[i] = '\0';
+			// GET - used for checking status of pathComp ... used url /pathComp/api/v1/health
+			if (method == REST_API_METHOD_GET) {
+				if (strncmp((const char*)url, "/health", 7) != 0) {
+					DEBUG_PC("unknown url [%s] for GET method -- Heatlh function", url);
+					RESTapi_stop(client, source, fd);
+					exit(-1);
+				}
+				else {
+					DEBUG_PC("Sending API Response OK to health requests");
+					rapi_response_ok(source, HTTP_RETURN_CODE_OK, NULL);
+					return TRUE;
+				}
+			}
+			// for method POST, PUT check that the url is "/pathComp"
+			if (method == REST_API_METHOD_POST) {
+				if (strncmp((const char*)url, "/pathComp/api/v1/compRoute", 26) != 0) {
+					DEBUG_PC("Unknown url: %s", url);
+					RESTapi_stop(client, source, fd);
+					exit(-1);
+				}
+			}
+			// get the version	
+			i = get_version(buf, &j, version);
+			version[i] = '\0';
+			break;
 		}
 	}
-	
-	// get the version	
-	i = get_version (buf, &j, version);
-	version[i] = '\0';		
-
 	// Assume HTTP/1.1, then there is Host Header
 	memset(buf, '\0', sizeof(buf));        
 	nbytes = RESTapi_get_line(source, buf, sizeof (buf));
-	if (nbytes == -1)
-	{
+	if (nbytes == -1) {
 		DEBUG_PC ("nbytes -1 ... then close the fd and eliminate associated client");			
 		RESTapi_stop(client, source, fd);
 		return FALSE;					
-	}
-
-	//DEBUG_PC ("Header: %s", buf);	
+	}	
 	
 	// Headers --- The Header Fields ends up with a void line (i.e., \r\n)
-	while ((nbytes > 0) && (strcmp ("\r\n", (const char *)buf) != 0))
-	{	
+	while ((nbytes > 0) && (strcmp ("\r\n", (const char *)buf) != 0)) {	
 		/* read & discard headers */
 		memset(buf, '\0', sizeof(buf));  
 		nbytes = RESTapi_get_line (source, buf, sizeof (buf));
-		if (nbytes == -1)
-		{
+		if (nbytes == -1) {
 			DEBUG_PC ("nbytes -1 ... then close the fd and eliminate associated client");	
 			RESTapi_stop(client, source, fd);
 			return FALSE;
 		}
 		//DEBUG_PC ("Header: %s", buf);	  
-		if (strncmp ((const char *)buf, "Content-Length:", 15) == 0)
-		{
+		if (strncmp ((const char *)buf, "Content-Length:", 15) == 0) {
 			//DEBUG_PC ("Header Content-Length Found");
 			gchar str[20];
 	  
 			gint i = 15, k = 0;  // "Content-Length:" We skip the first 16 characters to directly retrieve the length in bytes of the Body of Request
 			gchar contentLength[255];
 			memset (contentLength, '\0', sizeof (contentLength));			
-			while (buf[i] != '\r')
-			{
+			while (buf[i] != '\r') {
 				//DEBUG_PC ("%c", buf[i]);
 				str[k] = buf[i];
 				k++, i++;
 			}
 			str[k] = '\0';			
 			j = 0, i = 0;
-			while (ISspace(str[j]) && (j < strlen(str)))
-			{
+			while (ISspace(str[j]) && (j < strlen(str))) {
 				j++;
 			}
-			while (j < strlen(str))
-			{
+			while (j < strlen(str)) {
 				contentLength[i] = str[j];
 				i++; j++;
 			}
@@ -1761,8 +1909,7 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 		}	  
 	}
 	//DEBUG_PC("Read Entire HTTP Header");
-	if (body_length == 0)
-	{
+	if (body_length == 0) {
 		DEBUG_PC ("--- NO REST API Body length (length = %d) ---", body_length);
 		return TRUE;
 	}       
@@ -1771,23 +1918,23 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	/////////////////////////////////////////////////////////////////////////////////////////////////////////////
 	//DEBUG_PC ("REST API Request - Body -");
 	nbytes = read_channel (source, (guchar *)(client->ibuf->data + client->ibuf->putp), body_length);
-	if ((nbytes < 0) && (body_length > 0))
-	{
+	if ((nbytes < 0) && (body_length > 0)) 	{
 		DEBUG_PC ("nbytes: %d; body_length: %d", nbytes, body_length);
 		exit (-1);
-	}
-	
+	}	
 	client->ibuf->putp += nbytes;
 	client->ibuf->endp += nbytes;		
 	///////////////////////////////////////////////////////////////////////////////////////////////////////////////
 	// Parsing the contents of the Request
 	///////////////////////////////////////////////////////////////////////////////////////////////////////////////
-	// build the device list
-	deviceList = create_device_list();
+	// build the device list	
+	deviceList = NULL;
 	// build the link list
-	linkList = create_link_list();
+	linkList = NULL;
 	// Create the network connectivity service list
-	serviceList = create_service_list();
+	serviceList = NULL;
+	// Create the active service List
+	activeServList = NULL;
 	
 	// Process the json contents and store relevant information at Device, Link,
 	// and network connectivity service
@@ -1806,22 +1953,21 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
 	gint httpCode = triggering_routeComp (compRouteOutputList, algId);	
 
 	// Send the response to the REST  API Client
-	if (httpCode != HTTP_RETURN_CODE_OK)
-	{            
+	if (httpCode != HTTP_RETURN_CODE_OK) {            
 		DEBUG_PC ("HTTP CODE: %d -- NO OK", httpCode);
 		rapi_response (source, httpCode);
 	}
-	else
-	{
+	else {
 		DEBUG_PC ("HTTP CODE: %d -- OK", httpCode);
 		rapi_response_ok (source, httpCode, compRouteOutputList);            
 	}
 	
 	// Release the variables		
-	g_free (compRouteOutputList);
-	g_free(linkList);
-	g_free(deviceList);
-	g_free(serviceList);
+	g_free (compRouteOutputList);	
+	g_list_free_full(g_steal_pointer(&linkList), (GDestroyNotify)destroy_link);
+	g_list_free_full(g_steal_pointer(&deviceList), (GDestroyNotify)destroy_device);
+	g_list_free_full(g_steal_pointer(&serviceList), (GDestroyNotify)destroy_requested_service);
+	g_list_free_full(g_steal_pointer(&activeServList), (GDestroyNotify)destroy_active_service);
 	return TRUE;  
 }
 
@@ -1838,23 +1984,20 @@ gboolean RESTapi_activity(GIOChannel *source, GIOCondition cond, gpointer data)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpointer data)
-{
+gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpointer data) {
 	DEBUG_PC (" ****** New TCP Connection (REST API) ******");
 	/** get size of client_addre structure */
 	struct sockaddr_in client_addr;
 	socklen_t client = sizeof(client_addr);
 	
-	if ((cond == G_IO_HUP) || (cond == G_IO_ERR) || (G_IO_NVAL))
-	{
+	if ((cond == G_IO_HUP) || (cond == G_IO_ERR) || (G_IO_NVAL)) {
 		//DEBUG_PC ("Something happening with the channel and fd ... cond: %d", cond);		
 		// Find the associated client (by the fd) and remove from PATH COMP client list. 
 		// Stop all the operations over that PATH COMP client bound channel
 		struct pathComp_client *pathComp_client = NULL;
 		gint fd = g_io_channel_unix_get_fd (source);
 		GList *found = g_list_find_custom (RESTapi_tcp_client_list, &fd, find_rl_client_by_fd);
-		if (found != NULL)
-		{
+		if (found != NULL) 	{
 			pathComp_client = (struct pathComp_client*)(found->data);
 			// remove client
 			RESTapi_client_close(pathComp_client);
@@ -1864,28 +2007,22 @@ gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpoin
 			return FALSE;
 		}		
 	}
-	if (cond == G_IO_IN)
-	{
+	if (cond == G_IO_IN) 	{
 		gint new = accept(g_io_channel_unix_get_fd(source), (struct sockaddr*)&client_addr, &client);
-		if (new < 0)
-		{
+		if (new < 0) {
 			//DEBUG_PC ("Unable to accept new connection");
 			return FALSE;
 		}
 
-		/** new channel */
+		// new channel
 		GIOChannel * new_channel = g_io_channel_unix_new (new);		
 		//DEBUG_PC ("TCP Connection (REST API) is UP; (socket: %d)", new);
-
-		/** create pathComp client */		
+		// create pathComp client		
 		struct pathComp_client *new_client = RESTapi_client_create (new_channel, new);
 		
-		/** 
-		* force binary encoding with NULL
-		*/
+		// force binary encoding with NULL
 		GError *error = NULL;
-		if ( g_io_channel_set_encoding (new_channel, NULL, &error) != G_IO_STATUS_NORMAL)
-		{		
+		if ( g_io_channel_set_encoding (new_channel, NULL, &error) != G_IO_STATUS_NORMAL) {		
 			DEBUG_PC ("Error: %s", error->message);
 			exit (-1);
 		}
@@ -1893,8 +2030,7 @@ gboolean RESTapi_tcp_new_connection(GIOChannel *source, GIOCondition cond, gpoin
 		// On unbuffered channels, it is safe to mix read
 		// & write calls from the new and old APIs.
 		g_io_channel_set_buffered (new_channel, FALSE);
-		if (g_io_channel_set_flags (new_channel, G_IO_FLAG_NONBLOCK, &error) != G_IO_STATUS_NORMAL )
-		{
+		if (g_io_channel_set_flags (new_channel, G_IO_FLAG_NONBLOCK, &error) != G_IO_STATUS_NORMAL ) {
 			DEBUG_PC ("Error: %s", error->message);
 			exit (-1);
 		}
diff --git a/src/pathcomp/backend/pathComp_RESTapi.h b/src/pathcomp/backend/pathComp_RESTapi.h
index 3b662955959fd8ddad27e337338440b6834f9741..997adce3ead70a314c5d49a6ebeda74ea65ee6a2 100644
--- a/src/pathcomp/backend/pathComp_RESTapi.h
+++ b/src/pathcomp/backend/pathComp_RESTapi.h
@@ -48,8 +48,7 @@
 // List of tcp clients connected to PATH COMP
 
 #define PATH_COMP_CLIENT_TYPE	1000
-struct pathComp_client
-{
+struct pathComp_client {
 	/** IO Channel from client. */
 	GIOChannel *channel;
 
diff --git a/src/pathcomp/backend/pathComp_ear.c b/src/pathcomp/backend/pathComp_ear.c
new file mode 100644
index 0000000000000000000000000000000000000000..aee3d09f768619f3f6eb40231133fedd30dbb769
--- /dev/null
+++ b/src/pathcomp/backend/pathComp_ear.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h> 
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <string.h>
+#include <unistd.h>
+#include <netdb.h>
+#include <glib.h>
+#include <sys/time.h>
+#include <ctype.h>
+#include <strings.h>
+#include <time.h>
+#include <math.h>
+#include <fcntl.h>
+
+#include "pathComp_log.h"
+#include "pathComp_tools.h"
+#include "pathComp_ear.h"
+
+// Global Variables
+GList* contextSet;
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_ear.c
+ * 	@brief Iterates over the list of network connectivity service requests
+ * to compute their own paths fulfilling the constraints and minimizing the 
+ * total consume energy (power)
+ *
+ *  @param outputList
+ *
+ *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+void ear_comp_services(struct compRouteOutputList_t* oPathList, gint activeFlag) {
+	g_assert(oPathList);
+	// Check at least there is a service to be processed 
+	if (g_list_length(serviceList) == 0) {
+		DEBUG_PC("serviceList is Empty...");
+		return;
+	}	
+	gint i = 0;
+	DEBUG_PC("[EAR]----- Starting the Energy Aware Routing Computation ------");
+	DEBUG_PC("[EAR]----- Over Context %s Devices and Links", activeFlag ? "Active" : "All");
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+		struct service_t* service = (struct service_t*)(listnode->data);
+
+		DEBUG_PC("[EAR] Triggering Computation ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId);
+		struct compRouteOutput_t* pathService = &(oPathList->compRouteConnection[i]);
+		DEBUG_PC("Number of pathService[%d]->paths: %d", i, pathService->numPaths);
+		// check endpoints of the service are different (PE devices/nodes are different)
+		if (same_src_dst_pe_nodeid(service) == 0) {
+			DEBUG_PC("[EAR] PEs are the same... no path computation");
+			comp_route_connection_issue_handler(pathService, service);
+			oPathList->numCompRouteConnList++;
+			continue;
+		}
+		struct graph_t* g = get_graph_by_contextId(contextSet, service->serviceId.contextId);
+		if (g == NULL) {
+			DEBUG_PC("[EAR] contextId: %s NOT in the ContextSet ... then NO graph", service->serviceId.contextId);
+			comp_route_connection_issue_handler(pathService, service);
+			oPathList->numCompRouteConnList++;
+			continue;
+		}
+		alg_comp(service, pathService, g, ENERGY_EFFICIENT_ARGUMENT);
+		oPathList->numCompRouteConnList++;
+
+		// for each network connectivity service, a single computed path (out of the KCSP) is retuned
+		// If path is found, then the selected resources must be pre-assigned into the context information
+		if (pathService->noPathIssue == NO_PATH_CONS_ISSUE) {
+			continue;
+		}
+		struct path_t* path = &(pathService->paths[pathService->numPaths - 1]);
+		allocate_graph_resources(path, service, g);
+		allocate_graph_reverse_resources(path, service, g);
+		print_graph(g);
+	}
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_ear.c
+ * 	@brief Tries to route all the services over the active devices and links. If not all 
+ * these services can be routed, then it is tried to route them through the whole context 
+ * including both active and slept/power off devices and links
+ *
+ *  @param oList
+ *
+ *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ ////////////////////////////////////////////////////////////////////////////////////////
+void ear_comp(struct compRouteOutputList_t* oList) {
+	g_assert(oList);
+
+	DEBUG_PC("Number of services to be processed: %d", g_list_length(serviceList));
+	// Make a copy of oList	to be derived from the active devices and links
+	struct compRouteOutputList_t* oListTmp = create_route_list();
+	duplicate_route_list(oListTmp, oList);
+	print_path_connection_list(oListTmp);
+	
+	// 1st - try to accommodate all the requested service over the active device and links
+	gint activeContext = 1;
+	// Create the context for the active devicesand links
+	DEBUG_PC("=========================== Building the Active ContextSet =================================");
+	contextSet = NULL;
+	build_contextSet_active(&contextSet);
+	//print_contextSet(contextSet);
+	ear_comp_services(oListTmp, activeContext);
+	
+	gint numSuccessPaths = 0;
+	// Check the number of succesfully computed paths, i.e., without path issues
+	for (gint i = 0; i < oListTmp->numCompRouteConnList; i++) {
+		struct compRouteOutput_t* ro = &(oListTmp->compRouteConnection[i]);
+		DEBUG_PC("Number of paths: %d for oListTmp[%d]", ro->numPaths, i);
+		if (ro->noPathIssue == 0) {
+			numSuccessPaths++;
+		}
+	}
+	if (numSuccessPaths == oListTmp->numCompRouteConnList) {
+		duplicate_route_list(oList, oListTmp);
+		g_free(oListTmp);
+		return;
+	}	
+	// 2nd - If not all the services have been accommodated, use the whole device and links
+	// Create the context for all the devices and links
+
+	// Remove the previous Context subject to active devices and links
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
+	contextSet = NULL;
+	DEBUG_PC("====================== Building the whole ContextSet =====================================");
+	build_contextSet(&contextSet);
+	//print_contextSet(contextSet);
+
+	activeContext = 0; // Active flag is not SET
+	ear_comp_services(oList, activeContext);	
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_ear.c
+ * 	@brief handles the path computation for energy aware routing
+ *
+ *  @param compRouteOutput
+ *
+ *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+gint pathComp_ear_alg(struct compRouteOutputList_t* routeConnList) {
+	g_assert(routeConnList);
+	print_path_connection_list(routeConnList);
+
+	gint numSuccesPathComp = 0, numPathCompIntents = 0;
+
+	DEBUG_PC("================================================================");
+	DEBUG_PC("===========================   EAR   =========================");
+	DEBUG_PC("================================================================");
+	// increase the number of Path Comp. Intents
+	numPathCompIntents++;
+	gint http_code = HTTP_CODE_OK;
+
+	// timestamp t0
+	struct timeval t0;
+	gettimeofday(&t0, NULL);
+
+	// Initialize and create the contextSet
+	//contextSet = NULL;	
+	//build_contextSet(contextSet);
+	//print_contextSet(contextSet);
+#if 1	
+	//Triggering the path computation for each specific network connectivity service
+	ear_comp(routeConnList);
+
+	// -- timestamp t1
+	struct timeval t1, delta;
+	gettimeofday(&t1, NULL);
+	delta.tv_sec = t1.tv_sec - t0.tv_sec;
+	delta.tv_usec = t1.tv_usec - t0.tv_usec;
+	delta = tv_adjust(delta);
+
+	numSuccesPathComp++;
+	update_stats_path_comp(routeConnList, delta, numSuccesPathComp, numPathCompIntents);
+	print_path_connection_list(routeConnList);
+#endif
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
+	return http_code;
+}
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_ear.h b/src/pathcomp/backend/pathComp_ear.h
new file mode 100644
index 0000000000000000000000000000000000000000..dff6202568572bfa3343c21c29ad663e167ccfaa
--- /dev/null
+++ b/src/pathcomp/backend/pathComp_ear.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _PATHCOMP_EAR_H
+#define _PATHCOMP_EAR_H
+
+#include <glib.h>
+#include <glib/gstdio.h>
+#include <glib-2.0/glib/gtypes.h>
+
+ // Prototype of external declaration of functions
+gint pathComp_ear_alg(struct compRouteOutputList_t*);
+
+#endif
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_ksp.c b/src/pathcomp/backend/pathComp_ksp.c
index 4ea413d5eabbccbe1f86a3bc94edca822ffc4e8d..00ebaf5b8b7e0a888720a4092a0d23d75a3eb04b 100644
--- a/src/pathcomp/backend/pathComp_ksp.c
+++ b/src/pathcomp/backend/pathComp_ksp.c
@@ -36,401 +36,7 @@
 #include "pathComp_ksp.h"
 
 // Global Variables
-struct map_nodes_t *mapNodes;
-struct graph_t *graph;
-struct contextSet_t* contextSet;
-
-///////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_ksp.c
- * 	@brief Dijkstra algorithm
- *
- *  @param srcMapIndex
- *  @param dstMapIndex
- *	@param g
- *	@param s
- *	@param SN
- *	@param RP
- *
- *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-void sp_comp(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct service_t* s,
-	struct nodes_t* SN, struct compRouteOutputItem_t* RP) {
-	g_assert(s);
-	g_assert(g);
-
-	// Set params into mapNodes related to the source nodes of the request
-	mapNodes->map[srcMapIndex].distance = 0.0;
-	mapNodes->map[srcMapIndex].latency = 0.0;
-	mapNodes->map[srcMapIndex].avaiBandwidth = 0.0;
-
-	// Initialize the set Q and S
-	GList* S = NULL, * Q = NULL;
-	gint indexVertice = -1;
-
-	//  Add the source into the Q
-	struct nodeItem_t* nodeItem = g_malloc0(sizeof(struct nodeItem_t));
-	if (nodeItem == NULL) {
-		DEBUG_PC("memory allocation failed\n");
-		exit(-1);
-	}
-	// initialize some nodeItem attributes
-	nodeItem->distance = 0.0;
-	nodeItem->latency = 0.0;
-	duplicate_node_id(&mapNodes->map[srcMapIndex].verticeId, &nodeItem->node);
-	Q = g_list_insert_sorted(Q, nodeItem, sort_by_distance);
-
-	// Check whether there is spurNode (SN) and rootPath (RP)
-	if (SN != NULL && RP != NULL) {
-		struct routeElement_t* re;
-		for (gint j = 0; j < RP->numRouteElements; j++)
-		{
-			// Get the source and target Nodes of the routeElement within the rootPath
-			re = &RP->routeElement[j];
-			DEBUG_PC ("root Link: aNodeId: %s (%s) --> zNodeiId: %s (%s)", re->aNodeId.nodeId, re->aEndPointId, re->zNodeId.nodeId, re->zEndPointId);
-
-			// if ingress of the root link (aNodeId) is the spurNode, then stops
-			if (compare_node_id(&re->aNodeId, SN) == 0)
-			{
-				DEBUG_PC ("root Link: aNodeId: %s and spurNode: %s -- stop exploring the rootPath (RP)", re->aNodeId.nodeId, SN->nodeId);
-				break;
-			}
-			// Extract from Q
-			GList* listnode = g_list_first(Q);
-			struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
-			Q = g_list_remove(Q, node);
-
-			//DEBUG_RL_RA ("Exploring node %s", node->node.nodeId);
-			indexVertice = graph_vertice_lookup(node->node.nodeId, g);
-			g_assert(indexVertice >= 0);
-
-			// Get the indexTargetedVertice
-			gint indexTVertice = -1;
-			indexTVertice = graph_targeted_vertice_lookup(indexVertice, re->zNodeId.nodeId, g);
-			gint done = check_link(node, indexVertice, indexTVertice, g, s, &S, &Q, mapNodes);
-			(void)done;
-
-			// Add to the S list
-			S = g_list_append(S, node);    
-		}
-
-		// Check that the first node in Q set is SpurNode, otherwise something went wrong ...
-		if (compare_node_id(&re->aNodeId, SN) != 0) {
-			//DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId);
-			g_list_free_full(S, g_free);
-			g_list_free_full(Q, g_free);
-			return;
-		}
-	}		
-	while (g_list_length(Q) > 0) {
-		//Extract from Q set
-		GList* listnode = g_list_first(Q);
-		struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
-		Q = g_list_remove(Q, node);
-		DEBUG_PC ("Q length: %d", g_list_length (Q)); 
-		DEBUG_PC ("DeviceId: %s", node->node.nodeId);            
-
-		// visit all the links from u within the graph
-		indexVertice = graph_vertice_lookup(node->node.nodeId, g);
-		g_assert(indexVertice >= 0);
-
-		// Check the targeted vertices from u
-		for (gint i = 0; i < g->vertices[indexVertice].numTargetedVertices; i++)  {                
-			gint done = check_link(node, indexVertice, i, g, s, &S, &Q, mapNodes);
-			(void)done;
-		}
-		// Add node into the S Set
-		S = g_list_append(S, node);
-		//DEBUG_PC ("S length: %d", g_list_length (S));              
-	}
-	g_list_free_full(S, g_free);
-	g_list_free_full(Q, g_free);
-	return;
-}
-
-///////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_ksp.c
- * 	@brief KSP computation using Dijkstra algorithm
- *
- *  @param pred
- *  @param g
- *	@param s
-  *	@param SN
- *	@param RP
- *
- *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s, 
-				struct nodes_t *SN, struct compRouteOutputItem_t *RP) {
-	g_assert(pred);
-	g_assert(g);
-	g_assert(s);
-
-	// Check the both ingress src and dst endpoints are in the graph
-	gint srcMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[0].device_uuid, mapNodes);
-	if (srcMapIndex == -1) {
-		DEBUG_PC("ingress DeviceId: %s NOT in the graph", s->service_endpoints_id[0].device_uuid);
-		return -1;
-	}
-
-	gint dstMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
-	if (dstMapIndex == -1) {
-		DEBUG_PC("egress DeviceId: %s NOT in the graph", s->service_endpoints_id[1].device_uuid);
-		return -1;
-	}
-
-	// Compute the shortest path route
-	sp_comp(srcMapIndex, dstMapIndex, g, s, SN, RP);
-		
-	// Check that a feasible solution in term of latency and bandwidth is found
-	gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
-	struct map_t* dest_map = &mapNodes->map[map_dstIndex];
-	if (!(dest_map->distance < INFINITY_COST)) 	{
-		DEBUG_PC("destination: %s NOT reachable", s->service_endpoints_id[1].device_uuid);
-		return -1;
-	}
-
-	DEBUG_PC("AvailBw @ %s is %f", dest_map->verticeId.nodeId, dest_map->avaiBandwidth);
-	// Check that the computed available bandwidth is larger than 0.0
-	if (dest_map->avaiBandwidth <= (gfloat)0.0) {
-		DEBUG_PC("dst: %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid);
-		return -1;
-	}
-	DEBUG_PC("dst: %s REACHABLE", s->service_endpoints_id[1].device_uuid);
-	// Handle predecessors
-	build_predecessors(pred, s, mapNodes);
-	return 1;
-}
-
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_ksp.c
- * 	@brief K-CSPF algorithm execution (YEN algorithm)
- *
- *  @param s
- *  @param path
- *  @param g
- *
- *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_t *g) {
-	g_assert(s);
-	g_assert(path);
-	g_assert(g);
-
-	// create map of devices/nodes to handle the path computation using the context
-	mapNodes = create_map_node();
-	build_map_node(mapNodes, g);
-
-	// predecessors to store the computed path    
-	struct pred_t* predecessors = create_predecessors();
-
-	struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[0]);
-	struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]);
-
-	// Compute the 1st KSP path
-	gint done = ksp_comp (predecessors, g, s, NULL, NULL);
-	if (done == -1) {
-		DEBUG_PC("NO PATH FOUND %s[%s] ---> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid);
-		comp_route_connection_issue_handler(path, s);
-		g_free(mapNodes); g_free(predecessors);
-		return;
-	}
-
-	// Construct the path from the computed predecessors
-	struct compRouteOutputItem_t* p = create_path_item();
-	//print_predecessors(predecessors);
-	build_path(p, predecessors, s);
-	//DEBUG_PC ("Path is constructed");
-
-	gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
-	struct map_t* dst_map = &mapNodes->map[indexDest];
-	// Get the delay and cost
-	memcpy(&p->cost, &dst_map->distance, sizeof(gdouble));
-	memcpy(&p->availCap, &dst_map->avaiBandwidth, sizeof(dst_map->avaiBandwidth));
-	memcpy(&p->delay, &dst_map->latency, sizeof(mapNodes->map[indexDest].latency));
-	DEBUG_PC ("Computed Path Avail Bw: %f, Path Cost: %f, latency: %f", p->availCap, p->cost, p->delay);
-	print_path(p);
-
-	// If 1st SP satisfies the requirements from the req, STOP
-	gboolean feasibleRoute = check_computed_path_feasability(s, p);
-	if (feasibleRoute == TRUE) 	{
-		DEBUG_PC("1st K-CSPF FEASIBLE, STOP!");
-		print_path (p);		
-		path->numPaths++;
-
-		// Copy the serviceId
-		DEBUG_PC("contextId: %s", s->serviceId.contextId);
-		copy_service_id(&path->serviceId, &s->serviceId);
-
-		// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
-		for (gint i = 0; i < s->num_service_endpoints_id; i++) {
-			struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[i]);
-			struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[i]);
-			copy_service_endpoint_id(oEp, iEp);
-		}
-		path->num_service_endpoints_id = s->num_service_endpoints_id;
-
-		// Copy the computed path
-		struct path_t* targetedPath = &(path->paths[path->numPaths - 1]);
-		duplicate_path_t(p, targetedPath);		
-		print_path_t (targetedPath);
-		g_free(predecessors);
-		g_free(p);
-		g_free(mapNodes);
-		return;
-	}
-
-	DEBUG_PC("1st CSPF COMPUTATION IS NOT FEASIBLE --> TRIGGER K COMPUTATIONS");
-	// Create A and B sets of paths to handle the YEN algorithm
-	struct path_set_t* A = create_path_set();
-	struct path_set_t* B = create_path_set();
-
-	// Add the previously computed path into A->paths[0]	
-	duplicate_path(p, &A->paths[0]);
-
-	A->numPaths++;
-	g_free(predecessors);
-	g_free(p);
-	for (gint k = 1; k < MAX_KSP_VALUE; k++) {
-		DEBUG_PC("------------ kth (%d) ---------------------", k);
-		struct compRouteOutputItem_t* p = create_path_item();
-		duplicate_path(&A->paths[k - 1], p);
-		// The spurNode ranges from near-end node of the first link to the near-end of the last link forming the kth path
-		gint i = 0;
-		struct compRouteOutputItem_t* rootPath = create_path_item();
-		for (i = 0; i < p->numRouteElements; i++) {
-			struct nodes_t* spurNode = create_node();
-			struct nodes_t* nextSpurNode = create_node();
-			struct routeElement_t* re = &(p->routeElement[i]);
-			// Create predecessors to store the computed path
-			struct pred_t* predecessors = create_predecessors();
-			// Clear previous mapNodes, i.e. create it again
-			g_free(mapNodes);
-			mapNodes = create_map_node();
-			build_map_node(mapNodes, g);
-			struct nodes_t* n = &re->aNodeId;
-			duplicate_node_id(n, spurNode);
-			n = &re->zNodeId;
-			duplicate_node_id(n, nextSpurNode);
-			DEBUG_PC("spurNode: %s --> nextSpurNode: %s", spurNode->nodeId, nextSpurNode->nodeId);
-
-			// rootPath contains a set of links of A[k-1] from the source Node till the SpurNode -> NextSpurNode
-			// Example: A[k-1] = {L1, L2, L3, L4}, i.e. " Node_a -- L1 --> Node_b -- L2 --> Node_c -- L3 --> Node_d -- L4 --> Node_e "
-			// E.g., for the ith iteration if the spurNode = Node_c and NextSpurNode = Node_d; then rootPath = {L1, L2, L3}			
-			add_routeElement_path_back(re, rootPath);
-			DEBUG_PC("rootPath:");
-			print_path(rootPath);
-
-			// For all existing and computed paths p in A check if from the source to the NextSpurNode
-			// the set of links matches with those contained in the rootPath
-			// If YES, remove from the auxiliary graph the next link in p from NextSpurNode
-			// Otherwise do nothing 
-			struct graph_t* gAux = create_graph();
-			// Baseline graph 
-			//build_graph (gAux);
-			duplicate_graph(g, gAux);
-			// Modified graph
-			modify_targeted_graph(gAux, A, rootPath, spurNode);
-
-			// Trigger the computation of the path from src to dst constrained to traverse all the links from src 
-			// to spurNode contained into rootPath over the resulting graph			
-			if (ksp_comp(predecessors, gAux, s, spurNode, rootPath) == -1) {
-				DEBUG_PC("FAILED SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
-				g_free(nextSpurNode);
-				g_free(spurNode);
-				g_free(gAux);
-				g_free(predecessors);
-				continue;
-			}
-			DEBUG_PC("SUCCESFUL SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
-			// Create the node list from the predecessors
-			struct compRouteOutputItem_t* newKpath = create_path_item();
-			build_path(newKpath, predecessors, s);
-			DEBUG_PC("new K (for k: %d) Path is built", k);
-			gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
-			struct map_t* dst_map = &mapNodes->map[indexDest];
-
-			memcpy(&newKpath->cost, &dst_map->distance, sizeof(gdouble));
-			memcpy(&newKpath->availCap, &dst_map->avaiBandwidth, sizeof(dst_map->avaiBandwidth));
-			memcpy(&newKpath->delay, &dst_map->latency, sizeof(mapNodes->map[indexDest].latency));			
-			DEBUG_PC("New PATH (@ kth: %d) ADDED to B[%d] - {Path Cost: %f, e2e latency: %f, bw: %f ", k, B->numPaths, newKpath->cost, newKpath->delay, newKpath->availCap);
-			// Add the computed kth SP to the heap B
-			duplicate_path(newKpath, &B->paths[B->numPaths]);
-			B->numPaths++;
-			DEBUG_PC("Number of B paths: %d", B->numPaths);
-
-			g_free(newKpath);
-			g_free(nextSpurNode);
-			g_free(spurNode);
-			g_free(gAux);
-			g_free(predecessors);
-		}
-
-		// If B is empty then stops
-		if (B->numPaths == 0) {
-			DEBUG_PC("B does not have any path ... the stops kth computation");
-			break;
-		}
-
-		// Sort the potential paths contained in B by cost and latency and available bandwidth
-		sort_path_set(B);
-
-		// Add the lowest path into A[k]		
-		DEBUG_PC("-------------------------------------------------------------");
-		DEBUG_PC("To Add SP from B[0] to A[%d] --- Path Cost: %f, e2e Latency: %f", A->numPaths, B->paths[0].cost, B->paths[0].delay);
-		duplicate_path(&B->paths[0], &A->paths[A->numPaths]);
-		A->numPaths++;
-		DEBUG_PC("A Set size: %d", A->numPaths);
-		DEBUG_PC("-------------------------------------------------------------");
-
-		// Remove/pòp front element from the path set B (i.e. remove B[0])
-		pop_front_path_set(B);
-		DEBUG_PC("B Set Size: %d", B->numPaths);
-	}
-
-	// Copy the serviceId
-	copy_service_id(&path->serviceId, &s->serviceId);
-	// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
-	for (gint m = 0; m < s->num_service_endpoints_id; m++) {
-		struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[m]);
-		struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[m]);
-		copy_service_endpoint_id(oEp, iEp);
-	}
-
-	for (gint ksp = 1; ksp < A->numPaths; ksp++){
-		if (ksp >= MAX_KSP_VALUE) {
-			DEBUG_PC("Number Requested paths (%d) REACHED - STOP", ksp);
-			break;
-		}
-		gdouble feasibleRoute = check_computed_path_feasability(s, &A->paths[ksp]);
-		if (feasibleRoute == TRUE) {
-			DEBUG_PC("A[k-th%d] available: %f, pathCost: %f; latency: %f", ksp, A->paths[ksp].availCap, A->paths[ksp].cost, A->paths[ksp].delay);
-			struct compRouteOutputItem_t* pathaux = &A->paths[ksp];
-			path->numPaths++;
-			struct path_t* targetedPath = &path->paths[path->numPaths - 1];
-			duplicate_path_t(pathaux, targetedPath);		
-			print_path_t(targetedPath);
-			remove_path_set(A);
-			remove_path_set(B);
-			return;
-		}
-	}
-	remove_path_set(A);
-	remove_path_set(B);
-	// No paths found --> Issue	
-	DEBUG_PC("K-SP failed!!!");
-	comp_route_connection_issue_handler(path, s);
-
-	return;
-}
+GList* contextSet;
 
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
@@ -446,14 +52,20 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_
  /////////////////////////////////////////////////////////////////////////////////////////
 void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) {
 	g_assert(outputList);
-	g_assert(contextSet);
-	g_assert(serviceList);
-
+	// Check at least there is a service to be processed 
+	if (g_list_length(serviceList) == 0) {
+		DEBUG_PC("serviceList is Empty...");
+		return;
+	}
 	DEBUG_PC("----- Starting the KSP Computation ------");
 
 	// Iterate over the list of requested network connectivity services
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		struct service_t* service = &(serviceList->services[i]);
+	gint i = 0;
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++){
+		//struct service_t* service = &(serviceList->services[i]);
+		struct service_t* service = (struct service_t*)(listnode->data);
 
 		DEBUG_PC("Starting the Computation for ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId);
 		struct compRouteOutput_t* pathService = &(outputList->compRouteConnection[i]);
@@ -472,13 +84,12 @@ void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) {
 			outputList->numCompRouteConnList++;
 			continue;
 		}
-		alg_comp(service, pathService, g);
+		alg_comp(service, pathService, g, NO_OPTIMIZATION_ARGUMENT); // last parameter 0 is related to an optimization computation argument
 		outputList->numCompRouteConnList++;
 
-		// for each network connectivity service, a single computed path (out of the KCSP) is retuned
+		// for each network connectivity service, a single computed path (out of the KSP) is retuned
 		// If path is found, then the selected resources must be pre-assigned into the context information
-		if (pathService->noPathIssue == NO_PATH_CONS_ISSUE)
-		{
+		if (pathService->noPathIssue == NO_PATH_CONS_ISSUE) {
 			continue;
 		}
 		struct path_t* path = &(pathService->paths[pathService->numPaths - 1]);
@@ -517,9 +128,9 @@ gint pathComp_ksp_alg(struct compRouteOutputList_t * routeConnList)
 	gettimeofday(&t0, NULL);	
 	
 	// Allocate memory for the context
-	contextSet = create_contextSet();
+	contextSet = NULL;
 	// Build up the contextSet (>= 1)
-	build_contextSet(contextSet);
+	build_contextSet(&contextSet);
 	print_contextSet(contextSet);	
 #if 1	
 	//Triggering the path computation for each specific network connectivity service
@@ -537,6 +148,6 @@ gint pathComp_ksp_alg(struct compRouteOutputList_t * routeConnList)
 	print_path_connection_list(routeConnList);
 #endif
 
-	g_free(contextSet);
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
 	return http_code;
 }
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_sp.c b/src/pathcomp/backend/pathComp_sp.c
index b143b04933f1ac9099af3edf3af087cc58e32c5b..48231b591e66ae0f8161ff14f79e7c9a6d832328 100644
--- a/src/pathcomp/backend/pathComp_sp.c
+++ b/src/pathcomp/backend/pathComp_sp.c
@@ -36,74 +36,7 @@
 #include "pathComp_sp.h"
 
 // Global Variables
-struct map_nodes_t* mapNodes;
-struct graph_t* graph;
-struct contextSet_t* contextSet;
-
-///////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_sp.c
- * 	@brief Excecution Dijkstra algorithm
- *
- *  @param srcMapIndex
- *  @param dstMapIndex
- *	@param g
- *	@param s
- *
- *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct service_t* s) {
-	g_assert(s);
-	g_assert(g);
-
-	// Set params into mapNodes related to the source nodes of the request
-	mapNodes->map[srcMapIndex].distance = 0.0;
-	mapNodes->map[srcMapIndex].latency = 0.0;
-	mapNodes->map[srcMapIndex].avaiBandwidth = 0.0;
-
-	// Initialize the set Q and S
-	GList* S = NULL, *Q = NULL;
-	gint indexVertice = -1;
-
-	//  Add the source into the Q
-	struct nodeItem_t* nodeItem = g_malloc0(sizeof(struct nodeItem_t));
-	if (nodeItem == NULL) {
-		DEBUG_PC("memory allocation failed\n");
-		exit(-1);
-	}
-	// initialize some nodeItem attributes
-	nodeItem->distance = 0.0;
-	nodeItem->latency = 0.0;
-	duplicate_node_id(&mapNodes->map[srcMapIndex].verticeId, &nodeItem->node);
-	Q = g_list_insert_sorted(Q, nodeItem, sort_by_distance);
-
-	while (g_list_length(Q) > 0) {
-		//Extract from Q set
-		GList* listnode = g_list_first(Q);
-		struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
-		Q = g_list_remove(Q, node);
-		DEBUG_PC("Q length: %d", g_list_length(Q));
-		DEBUG_PC("DeviceId: %s", node->node.nodeId);
-
-		// visit all the links from u within the graph
-		indexVertice = graph_vertice_lookup(node->node.nodeId, g);
-		g_assert(indexVertice >= 0);
-
-		// Check the targeted vertices from u
-		for (gint i = 0; i < g->vertices[indexVertice].numTargetedVertices; i++) {
-			gint done = check_link(node, indexVertice, i, g, s, &S, &Q, mapNodes);
-			(void)done;
-		}
-		// Add node into the S Set
-		S = g_list_append(S, node);
-		//DEBUG_PC ("S length: %d", g_list_length (S));              
-	}
-	g_list_free_full(S, g_free);
-	g_list_free_full(Q, g_free);
-	return;
-}
+GList* contextSet;
 
 ///////////////////////////////////////////////////////////////////////////////////
 /**
@@ -113,12 +46,13 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv
  *  @param pred
  *  @param g
  *	@param s
+ *  @param mapNodes
  *
  *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-gint computation(struct pred_t* pred, struct graph_t* g, struct service_t* s) {
+gint computation(struct pred_t* pred, struct graph_t* g, struct service_t* s, struct map_nodes_t* mapNodes) {
 	g_assert(pred);
 	g_assert(g);
 	g_assert(s);
@@ -137,7 +71,7 @@ gint computation(struct pred_t* pred, struct graph_t* g, struct service_t* s) {
 	}
 
 	// Compute the shortest path
-	dijkstra(srcMapIndex, dstMapIndex, g, s);
+	dijkstra(srcMapIndex, dstMapIndex, g, s, mapNodes, NULL, NULL, 0x00000000);
 
 	// Check that a feasible solution in term of latency and bandwidth is found
 	gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
@@ -179,7 +113,7 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 	g_assert(g);
 
 	// create map of devices / nodes to handle the path computation using the context
-	mapNodes = create_map_node();
+	struct map_nodes_t *mapNodes = create_map_node();
 	build_map_node(mapNodes, g);
 
 	// predecessors to store the computed path    
@@ -189,7 +123,7 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 	struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]);
 
 	// SP computation
-	gint done = computation(predecessors, g, s);
+	gint done = computation(predecessors, g, s, mapNodes);
 	if (done == -1) {
 		DEBUG_PC("NO PATH FOUND %s[%s] ---> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid);
 		comp_route_connection_issue_handler(path, s);
@@ -204,11 +138,8 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 	//DEBUG_PC ("Path is constructed");
 
 	gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
-	struct map_t* dst_map = &mapNodes->map[indexDest];
-	// Get the delay and cost
-	memcpy(&p->cost, &dst_map->distance, sizeof(gdouble));
-	memcpy(&p->availCap, &dst_map->avaiBandwidth, sizeof(dst_map->avaiBandwidth));
-	memcpy(&p->delay, &dst_map->latency, sizeof(mapNodes->map[indexDest].latency));
+	struct map_t* dst_map = &mapNodes->map[indexDest]; 	
+	set_path_attributes(p, dst_map);
 	DEBUG_PC("Computed Path Avail Bw: %f, Path Cost: %f, latency: %f", p->availCap, p->cost, p->delay);
 	print_path(p);
 
@@ -239,10 +170,8 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
 		g_free(mapNodes);
 		return;
 	}
-
 	DEBUG_PC("SP FAILED!!!");
 	comp_route_connection_issue_handler(path, s);
-
 	return;
 }
 
@@ -257,16 +186,21 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
  *	@author Ricardo Mart�nez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
-void sp_execution_services(struct compRouteOutputList_t* oPathList)
-{
-	g_assert(oPathList);
-	g_assert(contextSet);
-	g_assert(serviceList);
+void sp_execution_services(struct compRouteOutputList_t* oPathList) {
+	g_assert(oPathList);	
+	// Check at least there is a service to be processed 
+	if (g_list_length(serviceList) == 0) {
+		DEBUG_PC("Lengtg requested serviceList is Empty...");
+		return;
+	}
 
 	DEBUG_PC("----- Starting the SP Computation ------");
-
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		 struct service_t* service = &(serviceList->services[i]);
+	gint i = 0;
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+		//struct service_t* service = &(serviceList->services[i]);
+		struct service_t* service = (struct service_t*)(listnode->data);
 
 		 DEBUG_PC("Starting the Computation for ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId);
 		 struct compRouteOutput_t* pathService = &(oPathList->compRouteConnection[i]);
@@ -314,10 +248,8 @@ void sp_execution_services(struct compRouteOutputList_t* oPathList)
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList)
-{
+gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList) {
 	g_assert(routeConnList);
-
 	gint numSuccesPathComp = 0, numPathCompIntents = 0;
 
 	DEBUG_PC("================================================================");
@@ -332,9 +264,9 @@ gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList)
 	gettimeofday(&t0, NULL);
 
 	// Allocate memory for the context
-	contextSet = create_contextSet();
+	contextSet = NULL;
 	// Build up the contextSet (>= 1)
-	build_contextSet(contextSet);
+	build_contextSet(&contextSet);
 	print_contextSet(contextSet);
 #if 1	
 	//Triggering the path computation for each specific network connectivity service
@@ -352,7 +284,7 @@ gint pathComp_sp_alg(struct compRouteOutputList_t* routeConnList)
 	print_path_connection_list(routeConnList);
 #endif
 
-	g_free(contextSet);
+	g_list_free_full(g_steal_pointer(&contextSet), (GDestroyNotify)destroy_context);
 	return http_code;
 }
 
diff --git a/src/pathcomp/backend/pathComp_tools.c b/src/pathcomp/backend/pathComp_tools.c
index 5f1748b1a58a0d1b935c064ef4b92ac8ee0da389..e7b91ee9e5a8a0a1c28344d17247e307238ed4c7 100644
--- a/src/pathcomp/backend/pathComp_tools.c
+++ b/src/pathcomp/backend/pathComp_tools.c
@@ -59,7 +59,6 @@ struct timeval tv_adjust (struct timeval a) {
 		a.tv_usec -= 1000000;
 		a.tv_sec++;
 	}
-
 	while (a.tv_usec < 0) {
 		a.tv_usec += 1000000;
 		a.tv_sec--;
@@ -80,8 +79,7 @@ struct timeval tv_adjust (struct timeval a) {
  */
  ////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_string(gchar* dst, gchar* src) {
-	g_assert(dst);
-	g_assert(src);
+	g_assert(dst); g_assert(src);
 	strcpy(dst, src);
 	dst[strlen(dst)] = '\0';
 	return;
@@ -99,16 +97,15 @@ void duplicate_string(gchar* dst, gchar* src) {
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void print_path (struct compRouteOutputItem_t *p) {
-	g_assert(p);
-	
+	g_assert(p);	
 	DEBUG_PC ("=========== COMPUTED PATH =======================");
-	DEBUG_PC ("Path Avail. Bw: %f, E2E Path Latency: %f, Path Cost: %f", p->availCap, p->delay, p->cost);
+	DEBUG_PC ("E2E Avail. Bw: %f, Latency: %f, Cost: %f, Consumed Power (in W): %f", p->availCap, p->delay, p->cost, p->power);
 	for (gint k = 0; k < p->numRouteElements; k++) {
-		DEBUG_PC ("aNodeId: %s (%s) --> zNodeId: %s (%s)", p->routeElement[k].aNodeId.nodeId, p->routeElement[k].aEndPointId,
+		DEBUG_PC ("%s[%s] --> %s[%s]", p->routeElement[k].aNodeId.nodeId, p->routeElement[k].aEndPointId,
 																p->routeElement[k].zNodeId.nodeId, p->routeElement[k].zEndPointId);
-		DEBUG_PC("linkId: %s", p->routeElement[k].linkId);
-		DEBUG_PC("aTopologyId: %s", p->routeElement[k].aTopologyId);
-		DEBUG_PC("zTopologyId: %s", p->routeElement[k].zTopologyId);
+		DEBUG_PC("\t linkId: %s", p->routeElement[k].linkId);
+		DEBUG_PC("\t aTopologyId: %s", p->routeElement[k].aTopologyId);
+		DEBUG_PC("\t zTopologyId: %s", p->routeElement[k].zTopologyId);
 	}
 	DEBUG_PC ("==================================================================");		
 	return;
@@ -128,8 +125,8 @@ void print_path (struct compRouteOutputItem_t *p) {
 void print_path_t(struct path_t* p) {
 	g_assert(p);
 	DEBUG_PC(" ============ COMPUTED OUTPUT PATH =================");
-	DEBUG_PC("Path Avail Capacity: %f, Cost: %f, Latency: %f", p->path_capacity.value,
-			p->path_cost.cost_value, p->path_latency.fixed_latency);
+	DEBUG_PC("Path AvailBw: %f, Cost: %f, Latency: %f, Power: %f", p->path_capacity.value,
+			p->path_cost.cost_value, p->path_latency.fixed_latency, p->path_power.power);
 	DEBUG_PC("number of links of path %d", p->numPathLinks);
 	for (gint k = 0; k < p->numPathLinks; k++) {
 		DEBUG_PC("Link: %s", p->pathLinks[k].linkId);
@@ -144,6 +141,25 @@ void print_path_t(struct path_t* p) {
 	return;
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Function used allocate memory for struct path_t
+ *
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ ////////////////////////////////////////////////////////////////////////////////////////
+struct path_t* create_path() {
+	struct path_t* p = g_malloc0(sizeof(struct path_t));
+	if (p == NULL) {
+		DEBUG_PC("Memory allocation failure");
+		exit(-1);
+	}
+	return(p);
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -178,12 +194,9 @@ gchar* get_uuid_char(uuid_t uuid) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void copy_service_id(struct serviceId_t* o, struct serviceId_t* i) {
-	g_assert(o);
-	g_assert(i);
-
+	g_assert(o); g_assert(i);
 	memcpy(o->contextId, i->contextId, sizeof(i->contextId));
 	memcpy(o->service_uuid, i->service_uuid, sizeof(i->service_uuid));
-
 	return;
 }
 
@@ -200,8 +213,7 @@ void copy_service_id(struct serviceId_t* o, struct serviceId_t* i) {
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void copy_service_endpoint_id(struct service_endpoints_id_t* oEp, struct service_endpoints_id_t* iEp) {
-	g_assert(oEp);
-	g_assert(iEp);
+	g_assert(oEp); g_assert(iEp);
 
 	// copy topology information
 	memcpy(oEp->topology_id.contextId, iEp->topology_id.contextId, sizeof(iEp->topology_id.contextId));
@@ -216,8 +228,8 @@ void copy_service_endpoint_id(struct service_endpoints_id_t* oEp, struct service
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
- * 	@brief From the set of contexts, it is returned the graph associated to that contexct matching
- * with the passed contextId
+ * 	@brief From the set of contexts, it is returned the graph associated to that context matching
+ *	with the passed contextId.
  *
  *	@param Set
  *  @param contextId
@@ -226,15 +238,16 @@ void copy_service_endpoint_id(struct service_endpoints_id_t* oEp, struct service
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct graph_t* get_graph_by_contextId(struct contextSet_t* Set, gchar* contextId) {
-	g_assert(Set);
+struct graph_t* get_graph_by_contextId(GList* set, gchar* contextId) {
 	g_assert(contextId);
 
 	// iterate over the set of context. Pick the one matching with contextId, and return the graph.
 	// If not found, return NULL
 	struct graph_t* g = NULL;
-	for (gint i = 0; i < Set->num_context_set; i++) {
-		struct context_t* context = &(Set->contextList[i]);
+	for (GList *ln = g_list_first(set);
+		ln;
+		ln = g_list_next(ln)){
+		struct context_t* context = (struct context_t*)(ln->data);
 		if (strcmp(context->contextId, contextId) == 0) {
 			g = &(context->g);
 			return g;
@@ -297,16 +310,13 @@ struct path_constraints_t * get_path_constraints(struct service_t* s) {
  * 	@file pathComp_tools.c
  * 	@brief Creates the predecessors to keep the computed path
  *
- * 
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct pred_t * create_predecessors ()
-{
+struct pred_t * create_predecessors () {
 	struct pred_t *predecessors = g_malloc0 (sizeof (struct pred_t));
-	if (predecessors == NULL)
-	{
+	if (predecessors == NULL) {
 		DEBUG_PC ("memory allocation failed\n");
 		exit (-1);
 	}   
@@ -323,11 +333,9 @@ struct pred_t * create_predecessors ()
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct edges_t* create_edge()
-{
+struct edges_t* create_edge() {
 	struct edges_t* e = g_malloc0(sizeof(struct edges_t));
-	if (e == NULL)
-	{
+	if (e == NULL) {
 		DEBUG_PC("Memory allocation failed\n");
 		exit(-1);
 	}
@@ -376,16 +384,13 @@ void print_predecessors (struct pred_t *p)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void build_predecessors (struct pred_t *p, struct service_t *s, struct map_nodes_t *map)
-{
-	g_assert (p);
-	g_assert (s);
-	g_assert (map);
+void build_predecessors (struct pred_t *p, struct service_t *s, struct map_nodes_t *map) {
+	g_assert (p); g_assert (s); g_assert (map);
 	
 	struct nodes_t *v = create_node();
 	duplicate_string(v->nodeId, s->service_endpoints_id[1].device_uuid);
 	
-	struct edges_t *e = create_edge ();	
+	struct edges_t *e = create_edge();	
 	get_edge_from_map_by_node (e, v, map);
 			
 	// Get u (being source of edge e)
@@ -416,9 +421,7 @@ void build_predecessors (struct pred_t *p, struct service_t *s, struct map_nodes
 		p->numPredComp++;		
 	}
 	print_predecessors (p);
-    g_free (e);
-	g_free(v);
-	g_free(srcNode);
+    g_free (e); g_free(v); g_free(srcNode);
 	return;
 }
 
@@ -452,11 +455,9 @@ struct nodes_t * create_node ()
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct routeElement_t * create_routeElement ()
-{
+struct routeElement_t * create_routeElement () {
 	struct routeElement_t *rE = g_malloc0 (sizeof (struct routeElement_t));
-	if (rE == NULL)
-	{
+	if (rE == NULL)	{
 		DEBUG_PC ("memory allocation problem");
 		exit (-1);		
 	}
@@ -475,11 +476,9 @@ struct routeElement_t * create_routeElement ()
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void duplicate_node_id (struct nodes_t *src, struct nodes_t *dst)
-{	
+void duplicate_node_id (struct nodes_t *src, struct nodes_t *dst) {	
 	g_assert (src);
-	g_assert (dst);
-	
+	g_assert (dst);	
 	//DEBUG_PC ("Duplicate nodeId for %s", src->nodeId);	
 	strcpy (dst->nodeId, src->nodeId);	
 	return;
@@ -497,8 +496,7 @@ void duplicate_node_id (struct nodes_t *src, struct nodes_t *dst)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint compare_node_id (struct nodes_t *a, struct nodes_t *b)
-{
+gint compare_node_id (struct nodes_t *a, struct nodes_t *b) {
 	g_assert (a);
 	g_assert (b);	
 	return (memcmp (&a->nodeId, b->nodeId, strlen (b->nodeId)));	
@@ -541,8 +539,7 @@ void duplicate_routeElement (struct routeElement_t *src, struct routeElement_t *
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_edge (struct edges_t *e1, struct edges_t *e2) {
-	g_assert (e1);
-	g_assert (e2);
+	g_assert (e1); g_assert (e2);
 		
 	duplicate_node_id (&e2->aNodeId, &e1->aNodeId);
 	duplicate_node_id (&e2->zNodeId, &e1->zNodeId);
@@ -560,7 +557,8 @@ void duplicate_edge (struct edges_t *e1, struct edges_t *e2) {
 	memcpy(&e1->availCap, &e2->availCap, sizeof(gdouble));
 
 	memcpy (&e1->cost, &e2->cost, sizeof (gdouble));
-    memcpy (&e1->delay, &e2->delay, sizeof (gdouble));	
+    memcpy (&e1->delay, &e2->delay, sizeof (gdouble));
+	memcpy(&e1->energy, &e2->energy, sizeof(gdouble));
 	return;
 }
 
@@ -577,19 +575,18 @@ void duplicate_edge (struct edges_t *e1, struct edges_t *e2) {
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_path (struct compRouteOutputItem_t *a, struct compRouteOutputItem_t *b) {		
-	g_assert (a);
-	g_assert (b);
-	memcpy (&b->availCap, &a->availCap, sizeof (gdouble));
-	b->numRouteElements = a->numRouteElements;	
+	g_assert (a); 	g_assert (b);
+	memcpy(&b->availCap, &a->availCap, sizeof (gdouble));		
 	memcpy(&b->cost, &a->cost, sizeof(gdouble));	
-	memcpy (&b->delay, &a->delay, sizeof (gdouble));
+	memcpy(&b->delay, &a->delay, sizeof (gdouble));
+	memcpy(&b->power, &a->power, sizeof(gdouble));
+	b->numRouteElements = a->numRouteElements;
 	for (gint k = 0; k < a->numRouteElements; k++) {			
 		//DEBUG_PC ("aNodeId: %s // zNodeId: %s", a->routeElement[k].aNodeId.nodeId, a->routeElement[k].zNodeId.nodeId);
 		// aNodeId duplication
 		struct nodes_t *n1 = &(a->routeElement[k].aNodeId);
 		struct nodes_t *n2 = &(b->routeElement[k].aNodeId);			
-		duplicate_node_id (n1, n2);			
-					
+		duplicate_node_id (n1, n2);					
 		//zNodeId duplication
 		n1 = &(a->routeElement[k].zNodeId);
 		n2 = &(b->routeElement[k].zNodeId);			
@@ -615,14 +612,14 @@ void duplicate_path (struct compRouteOutputItem_t *a, struct compRouteOutputItem
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void duplicate_path_t(struct compRouteOutputItem_t* a, struct path_t* b)
-{
-	g_assert(a);
-	g_assert(b);
+void duplicate_path_t(struct compRouteOutputItem_t* a, struct path_t* b) {
+	g_assert(a); g_assert(b);
 
+	// transfer path characteristics ...
 	memcpy(&b->path_capacity.value, &a->availCap, sizeof(gdouble));
 	memcpy(&b->path_cost.cost_value, &a->cost, sizeof(gdouble));
 	memcpy(&b->path_latency.fixed_latency, &a->delay, sizeof(gdouble));
+	memcpy(&b->path_power.power, &a->power, sizeof(gdouble));
 
 	b->numPathLinks = a->numRouteElements;
 
@@ -661,23 +658,17 @@ void duplicate_path_t(struct compRouteOutputItem_t* a, struct path_t* b)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint get_map_index_by_nodeId (gchar *nodeId, struct map_nodes_t * mapN)
-{
-    gint index = -1;
-    gint i = 0;
-    
-    for (i = 0; i < mapN->numMapNodes; i++)
-    {
+gint get_map_index_by_nodeId (gchar *nodeId, struct map_nodes_t * mapN) {
+    gint i = 0;    
+    for (i = 0; i < mapN->numMapNodes; i++) {
 		//DEBUG_PC ("i: %d; current: %s // targeted: %s", i, mapN->map[i].verticeId.nodeId, nodeId);
-        if (memcmp (mapN->map[i].verticeId.nodeId, nodeId, strlen (nodeId)) == 0)
-        {
-            index = i;
-			//DEBUG_PC ("Index: %d", index);
-            return index;            
+        if (memcmp (mapN->map[i].verticeId.nodeId, nodeId, strlen (nodeId)) == 0) {
+			//DEBUG_PC ("Index: %d", i);
+			return i;            
         }
     }
 	//DEBUG_PC ("Index: %d", index);
-    return index;
+    return -1;
 }
 
 ////////////////////////////////////////////////////////////////////////////////////////
@@ -693,14 +684,11 @@ gint get_map_index_by_nodeId (gchar *nodeId, struct map_nodes_t * mapN)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void get_edge_from_map_by_node (struct edges_t *e, struct nodes_t* v, struct map_nodes_t *mapN) {
-	
+void get_edge_from_map_by_node (struct edges_t *e, struct nodes_t* v, struct map_nodes_t *mapN) {	
 	//DEBUG_PC ("Get the Edge into map from node v: %s", v.nodeId);	
 	// Get the edge reaching the node v from mapNodes
-	gint map_vIndex = get_map_index_by_nodeId (v->nodeId, mapN);
-	
-	//DEBUG_PC ("aNodeId: %s --> zNodeId: %s", mapN->map[map_vIndex].predecessor.aNodeId.nodeId, mapN->map[map_vIndex].predecessor.zNodeId.nodeId);
-	
+	gint map_vIndex = get_map_index_by_nodeId (v->nodeId, mapN);	
+	//DEBUG_PC ("aNodeId: %s --> zNodeId: %s", mapN->map[map_vIndex].predecessor.aNodeId.nodeId, mapN->map[map_vIndex].predecessor.zNodeId.nodeId);	
 	struct edges_t *te = &(mapN->map[map_vIndex].predecessor);	
 	duplicate_edge (e, te);
 	return;
@@ -721,7 +709,6 @@ void get_edge_from_map_by_node (struct edges_t *e, struct nodes_t* v, struct map
 /////////////////////////////////////////////////////////////////////////////////////////
 void get_edge_from_predecessors (struct edges_t *e, struct nodes_t* n, struct pred_t *predecessors) {
 	g_assert(predecessors);
-
 	DEBUG_PC ("Get edge outgoing node %s from predecessors list", n->nodeId);
 	//print_predecessors (predecessors);
 	for (gint i = 0; i < predecessors->numPredComp; i++) {
@@ -751,14 +738,13 @@ void get_edge_from_predecessors (struct edges_t *e, struct nodes_t* n, struct pr
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, struct service_t *s) {
-	//DEBUG_PC ("\n");
 	// Get the source device Id	of the network connectivity service
 	struct nodes_t *v = create_node();
+	// Src Node of the Service set to v
 	duplicate_string(v->nodeId, s->service_endpoints_id[0].device_uuid);
-
-	struct edges_t* e = create_edge();
-							  	
+								  	
 	// Get the edge for v in predecessors
+	struct edges_t* e = create_edge();
 	get_edge_from_predecessors (e, v, predecessors);	
 	// Get the target for e
 	struct nodes_t u;	
@@ -778,14 +764,12 @@ void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, s
 	duplicate_string(p->routeElement[k].contextId, s->serviceId.contextId);
 	p->numRouteElements++;
 
-	// Get the destination device Id of the network connectivity service
+	// Get Dst Node of connectivity service
 	struct nodes_t* dst = create_node();
 	duplicate_string(dst->nodeId, s->service_endpoints_id[1].device_uuid);
-	while (compare_node_id (&u, dst) != 0)	
-	{
+	while (compare_node_id (&u, dst) != 0) {
 		k++; 
-		p->numRouteElements++;
-		// v = u		
+		p->numRouteElements++;			
 		duplicate_node_id (&u, v);
 		get_edge_from_predecessors (e, v, predecessors);
 		// Get the target u		
@@ -798,10 +782,7 @@ void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, s
 		duplicate_string(p->routeElement[k].linkId, e->linkId);
 		duplicate_string(p->routeElement[k].aTopologyId, e->aTopologyId);
 		duplicate_string(p->routeElement[k].zTopologyId, e->zTopologyId);
-		duplicate_string(p->routeElement[k].contextId, s->serviceId.contextId);
-
-		// copy the contextId
-		//duplicate_string(p->routeElement[k].contextId, s->service_endpoints_id[0].topology_id.contextId);
+		duplicate_string(p->routeElement[k].contextId, s->serviceId.contextId);		
 	}		
 	g_free(e); g_free(v); g_free(pathCons);
 	//DEBUG_PC ("Path is constructed");	
@@ -819,22 +800,19 @@ void build_path (struct compRouteOutputItem_t *p, struct pred_t *predecessors, s
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void print_graph (struct graph_t *g)
-{	     
+void print_graph (struct graph_t *g) {
+	g_assert(g);
     DEBUG_PC ("================================================================");
     DEBUG_PC ("===========================   GRAPH   ==========================");
     DEBUG_PC ("================================================================");
-
-	DEBUG_PC("Graph Num Vertices: %d", g->numVertices);
+	DEBUG_PC("Graph Num Vertices: %d", g->numVertices);    
     
-    gint i = 0, j = 0, k = 0;
-    for (i = 0; i < g->numVertices; i++)
-    {
+    for (gint i = 0; i < g->numVertices; i++) {
         DEBUG_PC ("Head Vertice [%s]", g->vertices[i].verticeId.nodeId);
-        for (j = 0; j < g->vertices[i].numTargetedVertices; j++)
+        for (gint j = 0; j < g->vertices[i].numTargetedVertices; j++)
         {
             DEBUG_PC ("  Tail Vertice: %s", g->vertices[i].targetedVertices[j].tVertice.nodeId);
-            for (k = 0; k < g->vertices[i].targetedVertices[j].numEdges; k++)
+            for (gint k = 0; k < g->vertices[i].targetedVertices[j].numEdges; k++)
             {
                 struct edges_t *e = &(g->vertices[i].targetedVertices[j].edges[k]);
 				DEBUG_PC ("%s(%s) --> %s(%s) [C: %f, Bw: %f b/s, Delay: %f ms]", e->aNodeId.nodeId, e->aEndPointId, e->zNodeId.nodeId, 
@@ -992,8 +970,7 @@ gint graph_targeted_vertice_add (gint vIndex, gchar *nodeId, struct graph_t *g)
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
-void remove_edge_from_graph (struct graph_t *g, struct edges_t *e)
-{
+void remove_edge_from_graph (struct graph_t *g, struct edges_t *e) {
 	// Find the ingress vertice into the graph
 	DEBUG_PC ("Removing from Graph %s[%s]) ---> %s[%s] (linkId: %s)", e->aNodeId.nodeId, e->aEndPointId, e->zNodeId.nodeId, e->aEndPointId, e->linkId);
 	gint verticeIndex = -1;		
@@ -1009,14 +986,13 @@ void remove_edge_from_graph (struct graph_t *g, struct edges_t *e)
 	if (targetedVerticeIndex == -1)	{
 		DEBUG_PC ("%s --> %s NOT in the Graph!!", e->aNodeId.nodeId, e->zNodeId.nodeId);
 		return;
-	}
-	
+	}	
 	//DEBUG_PC ("%s --> %s found in the Graph", e->aNodeId.nodeId, e->zNodeId.nodeId);
 	
 	// Get the edge position
 	gint edgeIndex = -1;
 	edgeIndex = graph_edge_lookup (verticeIndex, targetedVerticeIndex, e, g);
-	if (edgeIndex == -1) 	{
+	if (edgeIndex == -1) {
 		DEBUG_PC ("%s --> %s NOT in the Graph!!", e->aNodeId.nodeId, e->zNodeId.nodeId);
 		return;
 	}
@@ -1046,11 +1022,9 @@ void remove_edge_from_graph (struct graph_t *g, struct edges_t *e)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct path_set_t * create_path_set ()
-{
+struct path_set_t * create_path_set () {
 	struct path_set_t * p = g_malloc0 (sizeof (struct path_set_t));
-	if (p == NULL)
-	{
+	if (p == NULL) {
 		DEBUG_PC ("Memory allocation problem");
 		exit (-1);		
 	}
@@ -1068,10 +1042,8 @@ struct path_set_t * create_path_set ()
  *	@date 2021
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void remove_path_set(struct path_set_t* p)
-{
-	g_assert(p);
-	g_free(p);
+void remove_path_set(struct path_set_t* p) {
+	g_assert(p); g_free(p);
 	return;
 }
 
@@ -1087,15 +1059,14 @@ void remove_path_set(struct path_set_t* p)
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void build_map_node (struct map_nodes_t *mapN, struct graph_t *g)
-{
+void build_map_node (struct map_nodes_t *mapN, struct graph_t *g) {
 	//DEBUG_PC ("Construction of the Map of Nodes");               
-    for (gint i = 0; i < g->numVertices; i++)
-    {	
+    for (gint i = 0; i < g->numVertices; i++) {	
 		duplicate_node_id (&g->vertices[i].verticeId, &mapN->map[i].verticeId);
         mapN->map[i].distance = INFINITY_COST;
         mapN->map[i].avaiBandwidth = 0.0;
         mapN->map[i].latency = INFINITY_COST;
+		mapN->map[i].power = INFINITY_COST;
         mapN->numMapNodes++;
     }
     //DEBUG_PC ("mapNodes formed by %d Nodes", mapN->numMapNodes);
@@ -1107,22 +1078,137 @@ void build_map_node (struct map_nodes_t *mapN, struct graph_t *g)
  * 	@file pathComp_tools.c
  * 	@brief Allocate memory for path of struct compRouteOutputList_t *
  *
- *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct compRouteOutputList_t * create_route_list ()
-{
+struct compRouteOutputList_t * create_route_list () {
 	struct compRouteOutputList_t *p = g_malloc0 (sizeof (struct compRouteOutputList_t));
-	if (p == NULL)
-	{
+	if (p == NULL) {
 		DEBUG_PC ("Memory Allocation Problem");
 		exit (-1);
 	}
 	return p;
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Copy all the attributes defining a path
+ *
+ * @param dst_path
+ * @param src_path
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void copy_path(struct path_t* dst_path, struct path_t* src_path) {
+	g_assert(dst_path);
+	g_assert(src_path);
+
+	// Path capacity
+	dst_path->path_capacity.unit = src_path->path_capacity.unit;
+	memcpy(&dst_path->path_capacity.value, &src_path->path_capacity.value, sizeof(gdouble));
+
+	// Path latency
+	memcpy(&dst_path->path_latency.fixed_latency, &src_path->path_latency.fixed_latency, sizeof(gdouble));
+
+	// Path cost
+	duplicate_string(dst_path->path_cost.cost_name, src_path->path_cost.cost_name);
+	memcpy(&dst_path->path_cost.cost_value, &src_path->path_cost.cost_value, sizeof(gdouble));
+	memcpy(&dst_path->path_cost.cost_algorithm, &src_path->path_cost.cost_algorithm, sizeof(gdouble));
+
+	// Path links
+	dst_path->numPathLinks = src_path->numPathLinks;
+	for (gint i = 0; i < dst_path->numPathLinks; i++) {
+		struct pathLink_t* dPathLink = &(dst_path->pathLinks[i]);
+		struct pathLink_t* sPathLink = &(src_path->pathLinks[i]);
+
+		duplicate_string(dPathLink->linkId, sPathLink->linkId);
+		duplicate_string(dPathLink->aDeviceId, sPathLink->aDeviceId);
+		duplicate_string(dPathLink->zDeviceId, sPathLink->zDeviceId);
+		duplicate_string(dPathLink->aEndPointId, sPathLink->aEndPointId);
+		duplicate_string(dPathLink->zEndPointId, sPathLink->zEndPointId);
+
+		duplicate_string(dPathLink->topologyId.contextId, sPathLink->topologyId.contextId);
+		duplicate_string(dPathLink->topologyId.topology_uuid, sPathLink->topologyId.topology_uuid);
+
+		dPathLink->numLinkTopologies = sPathLink->numLinkTopologies;
+		for (gint j = 0; j < dPathLink->numLinkTopologies; j++) {
+			struct linkTopology_t* dLinkTop = &(dPathLink->linkTopologies[j]);
+			struct linkTopology_t* sLinkTop = &(sPathLink->linkTopologies[j]);
+
+			duplicate_string(dLinkTop->topologyId, sLinkTop->topologyId);
+		}
+	}
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Duplicate the route output instance
+ *
+ * @param dst_ro
+ * @param src_ro
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void duplicate_compRouteOuput(struct compRouteOutput_t* dst_ro, struct compRouteOutput_t* src_ro) {
+	g_assert(dst_ro); g_assert(src_ro); 
+		
+	// Copy the serviceId
+	copy_service_id(&dst_ro->serviceId, &src_ro->serviceId);
+	dst_ro->num_service_endpoints_id = src_ro->num_service_endpoints_id;
+
+	for (gint j = 0; j < dst_ro->num_service_endpoints_id; j++) {
+		struct service_endpoints_id_t* iEp = &(src_ro->service_endpoints_id[j]);
+		struct service_endpoints_id_t* oEp = &(dst_ro->service_endpoints_id[j]);
+		copy_service_endpoint_id(oEp, iEp);
+	}
+
+	// Copy paths
+	dst_ro->numPaths = src_ro->numPaths;
+	for (gint j = 0; j < dst_ro->numPaths; j++) {
+		struct path_t* dst_path = &(dst_ro->paths[j]);
+		struct path_t* src_path = &(src_ro->paths[j]);
+		copy_path(dst_path, src_path);
+	}
+	// copy no path issue value
+	dst_ro->noPathIssue = src_ro->noPathIssue;
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Duplicate the computation route output list
+ * 
+ * @param dst
+ * @param src
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void duplicate_route_list(struct compRouteOutputList_t* dst, struct compRouteOutputList_t* src) {
+	g_assert(src); g_assert(dst);
+
+	dst->numCompRouteConnList = src->numCompRouteConnList;
+	dst->compRouteOK = src->compRouteOK;
+	memcpy(&dst->compRouteConnAvBandwidth, &src->compRouteConnAvBandwidth, sizeof(gdouble));
+	memcpy(&dst->compRouteConnAvPathLength, &src->compRouteConnAvPathLength, sizeof(gdouble));
+	for (gint i = 0; i < src->numCompRouteConnList; i++) {
+		struct compRouteOutput_t* src_ro = &(src->compRouteConnection[i]);
+		struct compRouteOutput_t* dst_ro = &(dst->compRouteConnection[i]);
+		duplicate_compRouteOuput(dst_ro, src_ro);
+	}	
+	return;
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -1133,8 +1219,7 @@ struct compRouteOutputList_t * create_route_list ()
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-struct compRouteOutputItem_t *create_path_item ()
-{
+struct compRouteOutputItem_t *create_path_item () {
 	struct compRouteOutputItem_t *p = g_malloc0 (sizeof (struct compRouteOutputItem_t));
 	if (p == NULL) 	{
 		DEBUG_PC ("Memory Allocation Problem");
@@ -1146,71 +1231,86 @@ struct compRouteOutputItem_t *create_path_item ()
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
- * 	@brief Sort the set of paths according to the metric (1st criteria) and latency (2nd criteria)
+ * 	@brief Sort the set of paths the AvailBw, Cost and Delay
  *
  *	@params setP
+ *  @params args
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void sort_path_set(struct path_set_t* setP) {
+void sort_path_set(struct path_set_t* setP, guint args) {
 	g_assert(setP);
-	// Sort the paths contained in setP by shotest metric and latency	
-	float epsilon = 0.0000001;
+	// Sort the paths contained in setP by:
+	// 1st Criteria: The path cost (maybe bound to link distance)
+	// 2nd Criteria: The consumed path power 
+	// 3nd Criteria: The path latency
+	// 3rd Criteria: The available Bw
+	float epsilon = 0.1;
 
 	for (gint i = 0; i < setP->numPaths; i++) {
 		for (gint j = 0; j < (setP->numPaths - i - 1); j++)	{
 			struct compRouteOutputItem_t* path1 = &setP->paths[j];
-			struct compRouteOutputItem_t* path2 = &setP->paths[j + 1];
-			
+			struct compRouteOutputItem_t* path2 = &setP->paths[j + 1];			
 			struct compRouteOutputItem_t* pathTmp = create_path_item();
-			// 1st Criteria (avail Bw)
-			if ((path2->availCap - path1->availCap > 0.0) && (fabs(path1->availCap - path2->availCap) > epsilon)) {
+			//////////////////////// Criterias ////////////////////////////////////////
+			// 1st Criteria (Cost)
+			if (path2->cost < path1->cost) {
 				duplicate_path(path1, pathTmp);
 				duplicate_path(path2, path1);
 				duplicate_path(pathTmp, path2);
 				g_free(pathTmp);
 				continue;
 			}
-			else if ((path1->availCap - path2->availCap > 0.0) && (fabs(path1->availCap - path2->availCap) > epsilon)) {
-				g_free(pathTmp);
-				continue;
-			}
-			// likely the same available bw between path1 and path2
-			else if (fabs(path1->availCap - path2->availCap) < epsilon) {
-				// 2nd criteria: sort path cost
-				if (path1->cost > path2->cost) {
-					duplicate_path(path1, pathTmp);
-					duplicate_path(path2, path1);
-					duplicate_path(pathTmp, path2);
-					g_free(pathTmp);
-					continue;
-				}
-				else if (path1->cost < path2->cost) {
-					g_free(pathTmp);
-					continue;
-				}
-				// 3rd criteria: same path cost, prioritize the one with lowest e2e latency
-				else if (path1->cost == path2->cost) {
-					if ((path2->delay - path1->delay > 0.0) && (fabs(path1->delay - path2->delay) > epsilon)) {
+			if (path2->cost == path1->cost) {
+				// 2nd Criteria (Energy)
+				if (args & ENERGY_EFFICIENT_ARGUMENT) {
+					if (path2->power < path1->power) {
+						duplicate_path(path1, pathTmp);
+						duplicate_path(path2, path1);
+						duplicate_path(pathTmp, path2);
+						g_free(pathTmp);
+						continue;
+					}
+					else {	  // path1->power < path2->power
 						g_free(pathTmp);
 						continue;
 					}
-					else if ((path1->delay - path2->delay > 0.0) && (fabs(path1->delay - path2->delay) > epsilon)) {
+				}
+				else { // No enery efficient argument
+					// 3rd Criteria (latency)
+					if (path2->delay < path1->delay) {
 						duplicate_path(path1, pathTmp);
 						duplicate_path(path2, path1);
 						duplicate_path(pathTmp, path2);
 						g_free(pathTmp);
 						continue;
 					}
-					// Same bw, same cost and same latency, path1 and path2 are practically the same
-					else if (fabs(path1->delay - path2->delay) < epsilon) {
+					else if (path1->delay < path2->delay) {
 						g_free(pathTmp);
 						continue;
 					}
+					else { // path1->delay == path2->delay
+						// 4th Criteria (available bw)
+						if (path2->availCap > path1->availCap) {
+							duplicate_path(path1, pathTmp);
+							duplicate_path(path2, path1);
+							duplicate_path(pathTmp, path2);
+							g_free(pathTmp);
+							continue;
+						}
+						else {
+							g_free(pathTmp);
+							continue;
+						}
+					}
 				}
-			}			
+			}
+			else {	// path1->cost < path2->cost
+				g_free(pathTmp);
+				continue;
+			}				
 		}
 	}
 	return;
@@ -1249,8 +1349,7 @@ void pop_front_path_set (struct path_set_t *setP) {
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void add_routeElement_path_back (struct routeElement_t *rE, struct compRouteOutputItem_t *p)
-{
+void add_routeElement_path_back (struct routeElement_t *rE, struct compRouteOutputItem_t *p) {
 	//DEBUG_PC ("p->numRouteElements: %d", p->numRouteElements);
 	p->numRouteElements++;
 	gint index = p->numRouteElements - 1;
@@ -1268,7 +1367,6 @@ void add_routeElement_path_back (struct routeElement_t *rE, struct compRouteOutp
 	duplicate_string(p->routeElement[index].linkId, rE->linkId);
 	duplicate_string(p->routeElement[index].aTopologyId, rE->aTopologyId);
 	duplicate_string(p->routeElement[index].zTopologyId, rE->zTopologyId);
-
 	return;
 }
 
@@ -1332,21 +1430,19 @@ gboolean matching_path_rootPath (struct compRouteOutputItem_t *ap, struct compRo
 /////////////////////////////////////////////////////////////////////////////////////////
 void modify_targeted_graph (struct graph_t *g, struct path_set_t *A, struct compRouteOutputItem_t * rootPath, struct nodes_t * spurNode) {
 	//DEBUG_PC ("Modify the Targeted graph according to the Yen algorithm principles");
-	for (gint j = 0; j < A->numPaths; j++)
-	{
+	for (gint j = 0; j < A->numPaths; j++) {
 		struct compRouteOutputItem_t *ap = &A->paths[j];
-		struct edges_t *e = create_edge ();
+		struct edges_t *e = create_edge();
 		gboolean ret =  FALSE;
 		ret = matching_path_rootPath (ap, rootPath, spurNode, e);		
 		if (ret == TRUE) {
-			//DEBUG_PC ("Removal %s [%u]--> %s [%u] from the graph", e->aNodeId.nodeId, e->aLinkId, e->zNodeId.nodeId, e->zLinkId);
+			DEBUG_PC ("Removal %s[%s] --> %s[%s] from the graph", e->aNodeId.nodeId, e->aEndPointId, e->zNodeId.nodeId, e->aEndPointId);
 			remove_edge_from_graph (g, e);
 			//DEBUG_PC ("Print Resulting Graph");
-			//print_graph (g);
+			print_graph (g);
 			g_free (e);			
 		}
-		if (ret == FALSE)
-		{
+		if (ret == FALSE) {
 			g_free (e);
 			continue;
 		}						
@@ -1399,54 +1495,45 @@ gint find_nodeId (gconstpointer data, gconstpointer userdata)
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struct graph_t *g, 
-				struct service_t *s, GList **S, GList **Q, struct map_nodes_t *mapNodes) { 
-	g_assert(g);
-	g_assert(s);
-	g_assert(mapNodes);
+				struct service_t *s, GList **S, GList **Q, struct map_nodes_t *mapNodes, 
+				guint arg) { 
+	g_assert(g); g_assert(s); g_assert(mapNodes);
 
 	struct targetNodes_t *v = &(g->vertices[indexGraphU].targetedVertices[indexGraphV]);	
-    DEBUG_PC("Explored link (u ===> v):");
-	DEBUG_PC("u: %s ====>", u->node.nodeId);
-	DEBUG_PC("====> v: %s", v->tVertice.nodeId);
+    DEBUG_PC("Explored Link %s => %s)", u->node.nodeId, v->tVertice.nodeId);
+	//DEBUG_PC("\t %s => %s", u->node.nodeId, v->tVertice.nodeId);	
     
     // v already explored in S? then, discard it
     GList *found = g_list_find_custom (*S, v->tVertice.nodeId, find_nodeId);
     if (found != NULL) {
-        DEBUG_PC ("v (%s) in S, discard to explore it!", v->tVertice.nodeId);        
+        DEBUG_PC ("v (%s) in S, Discard", v->tVertice.nodeId);        
         return 0;
     }
 
 	// Get the set of constraints imposed by the service
 	struct path_constraints_t* path_constraints = get_path_constraints(s);
-    gdouble distance_through_u = INFINITY_COST;
-    gdouble latency_through_u = INFINITY_COST;	
-	gint i = 0;
-
-    // Check bandwidth requirement is fulfillied on edge u --> v    
-    gint foundAvailBw = 0;
-    gdouble edgeAvailBw = 0.0;
+    gdouble distance_through_u = INFINITY_COST ,latency_through_u = INFINITY_COST, power_through_u = INFINITY_COST;
+	gint i = 0, foundAvailBw = 0;
+    // BANDWIDTH requirement to be fulfilled on EDGE u->v        
+    gdouble edgeAvailBw = 0.0, edgeTotalBw = 0.0;
     for (i = 0; i < v->numEdges; i++) {        
         struct edges_t *e = &(v->edges[i]);
 		memcpy (&edgeAvailBw, &(e->availCap), sizeof (gdouble));
-		DEBUG_PC("edge:u ===> v");
-        DEBUG_PC ("%s[%s] ===>", u->node.nodeId, e->aEndPointId);
-		DEBUG_PC("====> %s[%s]", v->tVertice.nodeId, e->zEndPointId);
-		DEBUG_PC("edge available bw: %f", edgeAvailBw);
-
-        // if network service constraint specifies "bandwidth" needs (assuming coherent units)
-		if (path_constraints->bw == TRUE) {
-			if (edgeAvailBw < path_constraints->bwConstraint) {
-				continue;
-			}
-			else {
-				foundAvailBw = 1;
-				break;
-			}
+		memcpy(&edgeTotalBw, &(e->totalCap), sizeof(gdouble));
+		DEBUG_PC("EDGE %s[%s] => %s[%s]", u->node.nodeId, e->aEndPointId, v->tVertice.nodeId, e->zEndPointId);
+        //DEBUG_PC ("\t %s[%s] =>", u->node.nodeId, e->aEndPointId);
+		//DEBUG_PC("\t => %s[%s]", v->tVertice.nodeId, e->zEndPointId);
+		DEBUG_PC("\t AvailBw: %f, TotalBw: %f", edgeAvailBw, edgeTotalBw);
+        // Check Service Bw constraint
+		if ((path_constraints->bw == TRUE) && (edgeAvailBw < path_constraints->bwConstraint))
+			continue;
+		else {
+			foundAvailBw = 1;
+			break;
 		}		
     }
-	// if bw path constraint is specified but not sastified ...	discard the edge
-    if ((path_constraints->bw == TRUE) && (foundAvailBw == 0))
-    {
+	// BW constraint NOT MET, then DISCARD edge
+    if ((path_constraints->bw == TRUE) && (foundAvailBw == 0)) {
         DEBUG_PC ("AvailBw: %f < path_constraint: %f -- Discard Edge", edgeAvailBw, path_constraints->bwConstraint);
 		g_free(path_constraints);
         return 0;    
@@ -1457,7 +1544,12 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
     gint map_uIndex = get_map_index_by_nodeId (u->node.nodeId, mapNodes);
 	struct map_t *u_map = &mapNodes->map[map_uIndex];
     distance_through_u = u_map->distance + v->edges[indexEdge].cost;
-    latency_through_u = u_map->latency + v->edges[indexEdge].delay;    
+    latency_through_u = u_map->latency + v->edges[indexEdge].delay;
+	// Consumed power at v through u is the sum
+	// 1. Power from src to u
+	// 2. Power-idle at node u
+	// 3. power consumed over the edge between u and v, i.e. energy*usedBw
+	power_through_u = u_map->power + g->vertices[indexGraphU].power_idle + ((edgeTotalBw - edgeAvailBw + path_constraints->bwConstraint) * (v->edges[indexEdge].energy));
     gdouble availBw_through_u = 0.0;
 
 	// ingress endpoint (u) is the src of the request
@@ -1476,7 +1568,7 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
 			memcpy (&availBw_through_u, &edgeAvailBw, sizeof (gdouble));
 		} 
     }     
-    // Relax the link according to the pathCost and latency
+    // Relax the link according to the pathCost, latency, and energy
     gint map_vIndex = get_map_index_by_nodeId (v->tVertice.nodeId, mapNodes);
 	struct map_t *v_map = &mapNodes->map[map_vIndex];
     // If cost dist (u, v) > dist (src, v) relax the link
@@ -1484,17 +1576,35 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
         //DEBUG_PC ("dist(src, u) + dist(u, v): %f > dist (src, v): %f --> Discard Link", distance_through_u, v_map->distance);  
         return 0;
     }
-    // If dist (src, u) + dist (u, v) = current dist(src, v), then use the latency as discarding criteria
-    if ((distance_through_u == v_map->distance) && (latency_through_u > v_map->latency)) {
-        //DEBUG_PC ("dist(src, u) + dist(u,v) = current dist(src, v), but latency (src,u) + latency (u, v) > current latency (src, v)");          
-        return 0;
-    }	
-	// If dist (src, u) + dist (u,v) == current dist(src, v) AND latency (src, u) + latency (u, v) == current latency (src, v), the available bandwidth is the criteria
-	if ((distance_through_u ==  v_map->distance) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) {
-		return 0;
-	}    
+	// If energy consumption optimization is requested
+	if (arg & ENERGY_EFFICIENT_ARGUMENT) {
+		if (distance_through_u == v_map->distance) {
+			if (power_through_u > v_map->power) {
+				DEBUG_PC("Energy (src -> u + u -> v: %f (Watts) >Energy (src, v): %f (Watts)--> DISCARD LINK", power_through_u, v_map->power);
+				return 0;
+			}
+			// same energy consumption, consider latency
+			if ((power_through_u == v_map->power) && (latency_through_u > v_map->latency)) {
+				return 0;
+			}
+			if ((power_through_u == v_map->power) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) {
+				return 0;
+			}
+		}
+	} // No optimization, rely on latency and available e2e bandwidth
+	else {
+		// If dist (src, u) + dist (u, v) = current dist(src, v), then use the latency as discarding criteria
+		if ((distance_through_u == v_map->distance) && (latency_through_u > v_map->latency)) {
+			//DEBUG_PC ("dist(src, u) + dist(u,v) = current dist(src, v), but latency (src,u) + latency (u, v) > current latency (src, v)");          
+			return 0;
+		}
+		// If dist (src, u) + dist (u,v) == current dist(src, v) AND latency (src, u) + latency (u, v) == current latency (src, v), the available bandwidth is the criteria
+		if ((distance_through_u == v_map->distance) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) {
+			return 0;
+		}
+	}
     DEBUG_PC ("%s --> %s Relaxed", u->node.nodeId, v->tVertice.nodeId);
-    DEBUG_PC ("\t AvailBw: %f Mb/s, Cost: %f, Latency: %f ms", availBw_through_u, distance_through_u, latency_through_u);
+    DEBUG_PC ("\t AvailBw: %f Mb/s, Cost: %f, Latency: %f ms, Energy: %f Watts", availBw_through_u, distance_through_u, latency_through_u, power_through_u);
     
     // Update Q list -- 
     struct nodeItem_t *nodeItem = g_malloc0 (sizeof (struct nodeItem_t));
@@ -1505,26 +1615,31 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
     nodeItem->distance = distance_through_u;
 	memcpy(&nodeItem->distance, &distance_through_u, sizeof(gdouble));		     
 	memcpy(&nodeItem->latency, &latency_through_u, sizeof(gdouble));
+	memcpy(&nodeItem->power, &power_through_u, sizeof(gdouble));
 	duplicate_node_id (&v->tVertice, &nodeItem->node);	
 	// add node to the Q list
-    *Q = g_list_insert_sorted (*Q, nodeItem, sort_by_distance);
-    //DEBUG_PC ("%s ADDED to Q (length: %d)", nodeItem->node.nodeId, g_list_length(*Q));    
+	if (arg & ENERGY_EFFICIENT_ARGUMENT) {
+		*Q = g_list_insert_sorted(*Q, nodeItem, sort_by_energy);
+	}
+	else
+		*Q = g_list_insert_sorted (*Q, nodeItem, sort_by_distance);
     
-    // Update the mapNodes for the specific reached tv   
+	// Update the mapNodes for the specific reached tv   
     v_map->distance = distance_through_u;
 	memcpy(&v_map->distance, &distance_through_u, sizeof(gdouble));
     memcpy (&v_map->avaiBandwidth, &availBw_through_u, sizeof (gdouble));
     memcpy (&v_map->latency, &latency_through_u, sizeof (gdouble));
+	memcpy(&v_map->power, &power_through_u, sizeof(gdouble));
     // Duplicate the predecessor edge into the mapNodes 
 	struct edges_t *e1 = &(v_map->predecessor);
 	struct edges_t *e2 = &(v->edges[indexEdge]);
-	duplicate_edge (e1, e2);	
+	duplicate_edge(e1, e2);	
 	DEBUG_PC ("u->v Edge: %s(%s) --> %s(%s)", e2->aNodeId.nodeId, e2->aEndPointId, e2->zNodeId.nodeId, e2->zEndPointId);
-	DEBUG_PC("v-pred aTopology: %s", e2->aTopologyId);
+	//DEBUG_PC("v-pred aTopology: %s", e2->aTopologyId);
 	DEBUG_PC("v-pred zTopology: %s", e2->zTopologyId);
 
     // Check whether v is dstPEId
-	//DEBUG_PC ("Targeted dstPEId: %s", req->dstPEId.nodeId);
+	//DEBUG_PC ("Targeted dstId: %s", s->service_endpoints_id[1].device_uuid);
 	//DEBUG_PC ("nodeId added to the map: %s", v_map->verticeId.nodeId);
 	//DEBUG_PC ("Q Length: %d", g_list_length(*Q));
 	g_free(path_constraints);
@@ -1547,7 +1662,6 @@ gboolean check_computed_path_feasability (struct service_t *s, struct compRouteO
 	float epsilon = 0.0000001;
 	struct path_constraints_t* pathCons = get_path_constraints(s);
 	gboolean ret = TRUE;
-
 	if (pathCons->latency == TRUE) {
 		if ((pathCons->latencyConstraint - p->delay > 0.0) || (fabs(pathCons->latencyConstraint - p->delay) < epsilon)) {
 			DEBUG_PC("Computed Path (latency: %f) is feasible wrt Connection Demand: %f", p->delay, pathCons->latencyConstraint);
@@ -1558,8 +1672,7 @@ gboolean check_computed_path_feasability (struct service_t *s, struct compRouteO
 			return FALSE;
 		}
 	}
-	// Other constraints...
-	
+	// Other constraints...		
 	g_free(pathCons);
 	return ret;
 }
@@ -1569,12 +1682,13 @@ gboolean check_computed_path_feasability (struct service_t *s, struct compRouteO
  * 	@file pathComp_tools.c
  * 	@brief Sorting the GList Q items by distance 
  * 
+ * @param a
+ * @param b
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-gint sort_by_distance (gconstpointer a, gconstpointer b)
-{
+gint sort_by_distance (gconstpointer a, gconstpointer b) {
 	//DEBUG_PC ("sort by distance a and b");	
 	g_assert(a != NULL);
 	g_assert(b != NULL);
@@ -1592,13 +1706,55 @@ gint sort_by_distance (gconstpointer a, gconstpointer b)
 		return 1;
 	else if (node1->distance < node2->distance)
 		return 0;
-	if (node1->distance == node2->distance)
-	{
+	if (node1->distance == node2->distance) {
 		if (node1->latency > node2->latency)
 			return 1;
 		else if (node1->latency <= node2->latency)
 			return 0;
 	}
+	return 0;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Sorting the GList Q items by distance
+ * 
+ * @param a
+ * @param b
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+gint sort_by_energy(gconstpointer a, gconstpointer b) {	
+	g_assert(a != NULL);
+	g_assert(b != NULL);
+
+	//DEBUG_PC ("sort by distance a and b");	  
+	struct nodeItem_t* node1 = (struct nodeItem_t*)a;
+	struct nodeItem_t* node2 = (struct nodeItem_t*)b;
+	g_assert(node1);
+	g_assert(node2);
+	
+	//1st criteria: sorting by lowest distance
+	if (node1->distance > node2->distance)
+		return 1;
+	if (node1->distance < node2->distance)
+		return 0;
+
+	// 2nd Criteria: sorting by the lowest energy
+	if (node1->power > node2->power)
+		return 1;
+	if (node1->power < node1->power)
+		return 0;
+
+	// 3rd Criteria: by the latency 
+	if (node1->latency > node2->latency)
+		return 1;
+	if (node1->latency <= node2->latency)
+		return 0;
+	return 0;
 }
 
 ////////////////////////////////////////////////////////////////////////////////////////
@@ -1613,8 +1769,7 @@ gint sort_by_distance (gconstpointer a, gconstpointer b)
 /////////////////////////////////////////////////////////////////////////////////////////
 struct graph_t * create_graph () {
 	struct graph_t * g = g_malloc0 (sizeof (struct graph_t));
-	if (g == NULL)
-	{
+	if (g == NULL) {
 		DEBUG_PC ("Memory Allocation Problem");
 		exit (-1);
 	}
@@ -1633,8 +1788,7 @@ struct graph_t * create_graph () {
 /////////////////////////////////////////////////////////////////////////////////////////
 struct map_nodes_t * create_map_node ()	 {
 	struct map_nodes_t * mN = g_malloc0 (sizeof (struct map_nodes_t));
-	if (mN == NULL)
-	{
+	if (mN == NULL) {
 		DEBUG_PC ("Memory allocation failed");
 		exit (-1);
 	}
@@ -1652,78 +1806,18 @@ struct map_nodes_t * create_map_node ()	 {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-
 struct service_t* get_service_for_computed_path(gchar* serviceUUID) {
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		struct service_t* s = &(serviceList->services[i]);
-		if (strcmp(s->serviceId.service_uuid, serviceUUID) == 0)
-			return s;
+	gint i = 0;
+	for(GList *listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+			struct service_t* s = (struct service_t*)(listnode->data);
+			if (strcmp(s->serviceId.service_uuid, serviceUUID) == 0)
+				return s;
 	}
 	return NULL;
 }
 
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for struct deviceList_t
- *
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct deviceList_t* create_device_list()
-{
-	struct deviceList_t* dList = g_malloc0(sizeof(struct deviceList_t));
-	if (dList == NULL)
-	{
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	return dList;
-}
-
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for struct linkList_t
- *
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct linkList_t* create_link_list() {
-	struct linkList_t* lList = g_malloc0(sizeof(struct linkList_t));
-	if (lList == NULL)
-	{
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	lList->numLinks = 0;
-	return lList;
-}
-
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for struct serviceList_t
- *
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct serviceList_t* create_service_list() {
-	struct serviceList_t* sList = g_malloc0(sizeof(struct serviceList_t));
-	if (sList == NULL)
-	{
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	return sList;
-}
-
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -1766,8 +1860,7 @@ void print_service_type(guint type) {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void print_link_port_direction(guint direction)
-{
+void print_link_port_direction(guint direction) {
 	switch (direction) {
 		case LINK_PORT_DIRECTION_BIDIRECTIONAL:
 			//DEBUG_PC("Bidirectional Port Direction");
@@ -1796,8 +1889,7 @@ void print_link_port_direction(guint direction)
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void print_termination_direction(guint direction)
-{
+void print_termination_direction(guint direction) {
 	switch (direction) {
 	case TERMINATION_DIRECTION_BIDIRECTIONAL:
 		//DEBUG_PC("Bidirectional Termination Direction");
@@ -1922,26 +2014,6 @@ void print_link_forwarding_direction(guint linkFwDir) {
 	return;
 }
 
-////////////////////////////////////////////////////////////////////////////////////////
-/**
- * 	@file pathComp_tools.c
- * 	@brief Allocate memory for the contextSet
- *
- * @param 
- *
- * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2022
- */
- /////////////////////////////////////////////////////////////////////////////////////////
-struct contextSet_t* create_contextSet() {
-	struct contextSet_t* c = g_malloc0(sizeof(struct contextSet_t));
-	if (c == NULL) {
-		DEBUG_PC("Memory Allocation Failure");
-		exit(-1);
-	}
-	return c;
-}
-
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -1954,18 +2026,19 @@ struct contextSet_t* create_contextSet() {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct context_t* find_contextId_in_set(gchar* contextUuid, struct contextSet_t* set) {
-
-	g_assert(set);
-	//DEBUG_PC("Checking if contextId: %s in in the ContextList??", contextUuid);
-
-	for (gint i = 0; i < set->num_context_set; i++) { 	
-		struct context_t* c = &(set->contextList[i]);
+struct context_t* find_contextId_in_set(gchar* contextUuid, GList** set) {
+	//DEBUG_PC("Checking if contextId: %s in in the ContextSet??", contextUuid);
+	gint i = 0;
+	for (GList *ln = g_list_first(*set);
+		ln;
+		ln = g_list_next(ln)){
+		struct context_t* c = (struct context_t*)(ln->data);
 		//DEBUG_PC("Context Item [%d] Id: %s", i, c->contextId);
 		if (strcmp(contextUuid, c->contextId) == 0) {
 			//DEBUG_PC("contextId: %s is FOUND in the ContextSet_List", contextUuid);
 			return c;
 		}
+		i++;
 	}
 	//DEBUG_PC("contextId: %s NOT FOUND in the ContextSet_List", contextUuid);
 	return NULL;
@@ -1983,11 +2056,19 @@ struct context_t* find_contextId_in_set(gchar* contextUuid, struct contextSet_t*
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct context_t* add_contextId_in_set(gchar *contextUuid, struct contextSet_t *set) {
+struct context_t* add_contextId_in_set(gchar *contextUuid, GList** set) {
 
-	set->num_context_set++;
-	struct context_t* c = &(set->contextList[set->num_context_set - 1]);
+	struct context_t* c = g_malloc0(sizeof(struct context_t));
+	if (c == NULL) {
+		DEBUG_PC("Memory Allocation Failure");
+		exit(-1);
+	}
 	duplicate_string(c->contextId, contextUuid);
+	// Add the context into the context set
+	//DEBUG_PC("Adding ContextId: %s", contextUuid);
+	//DEBUG_PC(" (BEFORE ADDING) Context Set Length: %d", g_list_length(*set));
+	*set = g_list_append(*set, c);
+	//DEBUG_PC(" (AFTER ADDING) Context Set Length: %d", g_list_length(*set));
 	return c;
 }
 
@@ -2004,9 +2085,7 @@ struct context_t* add_contextId_in_set(gchar *contextUuid, struct contextSet_t *
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 struct vertices_t* find_vertex_in_graph_context(struct graph_t *g, gchar* deviceId) {
-
-	for (gint i = 0; i < g->numVertices; i++)
-	{
+	for (gint i = 0; i < g->numVertices; i++) {
 		struct vertices_t* v = &(g->vertices[i]);
 		if (strcmp(v->verticeId.nodeId, deviceId) == 0) {
 			return v;
@@ -2027,10 +2106,11 @@ struct vertices_t* find_vertex_in_graph_context(struct graph_t *g, gchar* device
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct vertices_t* add_vertex_in_graph(struct graph_t* g, gchar* deviceId) {
+struct vertices_t* add_vertex_in_graph(struct graph_t* g, struct device_t *d) {
 	g->numVertices++;
 	struct vertices_t* v = &(g->vertices[g->numVertices - 1]);
-	duplicate_string(v->verticeId.nodeId, deviceId);
+	duplicate_string(v->verticeId.nodeId, d->deviceId);
+	memcpy(&v->power_idle, &d->power_idle, sizeof(gdouble));
 	return v;
 }
 
@@ -2040,17 +2120,24 @@ struct vertices_t* add_vertex_in_graph(struct graph_t* g, gchar* deviceId) {
  * 	@brief Construct the graphs (vertices and edges) bound to every individual context
  *
  * @param cSet
+ * @param activeFlag
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
 /////////////////////////////////////////////////////////////////////////////////////////
-void build_contextSet_deviceList(struct contextSet_t* cSet) {
-	// Check every device their endpoints
-	for (gint i = 0; i < deviceList->numDevices; i++) {
-		struct device_t* d = &(deviceList->devices[i]);
+void build_contextSet_deviceList(GList** cSet, gint activeFlag) {
+	// Check every device their endpoints	
+	for (GList* listnode = g_list_first(deviceList); 
+		listnode; 
+		listnode = g_list_next(listnode)) {	
+		struct device_t* d = (struct device_t*)(listnode->data);
 		//DEBUG_PC("Exploring DeviceId: %s", d->deviceId);
 
+		if ((activeFlag == 1) && (d->operational_status != 2)) {
+			// it is only considered devices with operational status enabled, i.e., set to 2
+			continue;
+		}
 		// Check the associated endPoints
 		for (gint j = 0; j < d->numEndPoints; j++) {
 			struct endPoint_t* eP = &(d->endPoints[j]);
@@ -2058,18 +2145,17 @@ void build_contextSet_deviceList(struct contextSet_t* cSet) {
 			struct endPointId_t* ePid = &(eP->endPointId);  //end point id
 			//DEBUG_PC("   EndPointId: %s || Type: %s", eP->endPointId.endpoint_uuid, d->deviceType);
 			//DEBUG_PC("   TopologyId: %s || ContextId: %s", eP->endPointId.topology_id.topology_uuid, eP->endPointId.topology_id.contextId);
-
 			// Add contextId in ContextSet and the deviceId (+endpoint) into the vertex set
 			struct context_t *c = find_contextId_in_set(eP->endPointId.topology_id.contextId, cSet);
 			if (c == NULL) {
-				//DEBUG_PC("   contextUuid: %s MUST BE ADDED to ContextSet", eP->endPointId.topology_id.contextId);
+				DEBUG_PC("   contextUuid: %s MUST BE ADDED to ContextSet", eP->endPointId.topology_id.contextId);
 				c = add_contextId_in_set(eP->endPointId.topology_id.contextId, cSet);
 			}
 			// Check if the deviceId and endPointUuid are already considered in the graph of the context c
 			struct vertices_t* v = find_vertex_in_graph_context(&c->g, d->deviceId);
 			if (v == NULL) {
 				//DEBUG_PC("  deviceId: %s MUST BE ADDED to the Context Graph", d->deviceId);
-				v = add_vertex_in_graph(&c->g, d->deviceId);
+				v = add_vertex_in_graph(&c->g, d);
 			}
 		}
 	}
@@ -2132,8 +2218,10 @@ struct targetNodes_t* add_targeted_vertex_in_graph_context(struct vertices_t* v,
  /////////////////////////////////////////////////////////////////////////////////////////
 struct endPoint_t* find_device_tied_endpoint(gchar* devId, gchar* endPointUuid) {
 	//DEBUG_PC("devId: %s ePId: %s", devId, endPointUuid);
-	for (gint i = 0; i < deviceList->numDevices; i++) {
-		struct device_t* d = &(deviceList->devices[i]);
+	for (GList* ln = g_list_first(deviceList);
+		ln;
+		ln = g_list_next(ln)) {
+		struct device_t* d = (struct device_t*)(ln->data);
 		if (strcmp(d->deviceId, devId) != 0) {
 			continue;
 		}
@@ -2156,51 +2244,65 @@ struct endPoint_t* find_device_tied_endpoint(gchar* devId, gchar* endPointUuid)
  *
  * @param w
  * @param l
+ * @param activeFlag
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void add_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l) {
+void add_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l, gint activeFlag) {
 	//DEBUG_PC("\t targetedVertex: %s", w->tVertice.nodeId);
+
+	// Check if the activeFlag is 1. If YES, it is only added to the edges as long as the 
+	// associated endPoint is in status ENABLED, i.e., with operational status set to 2
+	// Get the endpoints (A and Z) of the link l (assumed P2P)
+	struct link_endpointId_t* aEndpointId = &(l->linkEndPointId[0]);
+	struct link_endpointId_t* zEndpointId = &(l->linkEndPointId[1]);
+	// Get the endPoint Information tied to the device bound to aEndPointId
+	struct endPoint_t* eP = find_device_tied_endpoint(aEndpointId->deviceId, aEndpointId->endPointId);
+	if (eP == NULL) {
+		DEBUG_PC("devId: %s endPointUuid: %s NOT in Device List!!--- Weird", aEndpointId->deviceId, aEndpointId->endPointId);
+		exit(-1);
+	}
+	// Check whether the port in that endPoint (eP) is Active upon the activeFlag being SET
+	if (activeFlag == 1) {
+		if (eP->operational_status != 2) // NOT ENABLED, then discard this link
+			return;
+	}
+
+	// Add the edge into the graph
 	w->numEdges++;
 	struct edges_t* e = &(w->edges[w->numEdges - 1]);
 	// Copy the link Id UUID
 	duplicate_string(e->linkId, l->linkId);
-
-	// copy the deviceId and endpointsIds (A --> Z)
-	struct link_endpointId_t* aEndpointId = &(l->linkEndPointId[0]);
 	duplicate_string(e->aNodeId.nodeId, aEndpointId->deviceId);
 	duplicate_string(e->aEndPointId, aEndpointId->endPointId);
-	duplicate_string(e->aTopologyId, aEndpointId->topology_id.topology_uuid);
-
-	struct link_endpointId_t* zEndpointId = &(l->linkEndPointId[1]);
+	duplicate_string(e->aTopologyId, aEndpointId->topology_id.topology_uuid);	
 	duplicate_string(e->zNodeId.nodeId, zEndpointId->deviceId);
 	duplicate_string(e->zEndPointId, zEndpointId->endPointId);
 	duplicate_string(e->zTopologyId, zEndpointId->topology_id.topology_uuid);
-
-	// The potential and available capacity is indeed retrieved using aEndpointId in the deviceList
-	struct endPoint_t* eP = find_device_tied_endpoint(aEndpointId->deviceId, aEndpointId->endPointId);
-	if (eP == NULL) {
-		DEBUG_PC("devId: %s endPointUuid: %s NOT in Device List!!--- Weird", aEndpointId->deviceId, aEndpointId->endPointId);
-		exit(-1);
-	}
+	
 	//Potential(total) and available capacity
 	e->unit = eP->potential_capacity.unit;
 	memcpy(&e->totalCap, &eP->potential_capacity.value, sizeof(gdouble));
 	memcpy(&e->availCap, &eP->available_capacity.value, sizeof(gdouble));
-
 	// Copy interdomain local/remote Ids
 	memcpy(e->interDomain_localId, eP->inter_domain_plug_in.inter_domain_plug_in_local_id, 
 		strlen(eP->inter_domain_plug_in.inter_domain_plug_in_local_id));
 	memcpy(e->interDomain_remoteId, eP->inter_domain_plug_in.inter_domain_plug_in_remote_id,
 		strlen(eP->inter_domain_plug_in.inter_domain_plug_in_remote_id));
-
 	// cost value
 	memcpy(&e->cost, &l->cost_characteristics.cost_value, sizeof(gdouble));
-
-	// latency
+	// latency ms
 	memcpy(&e->delay, &l->latency_characteristics.fixed_latency, sizeof(gdouble));
+	// energy J/bits ~ power
+	memcpy(&e->energy, &eP->energyConsumption, sizeof(gfloat));
+	
+	//DEBUG_PC("Edge - Total/Available Capacity: %f/%f; Cost: %f; Delay: %f, Energy: %f", eP->potential_capacity.value, eP->available_capacity.value,
+	//	l->cost_characteristics.cost_value, l->latency_characteristics.fixed_latency, l->energy_link);
+
+	//DEBUG_PC("Graph Edge - Total/Available Capacity: %f/%f; Cost: %f; Delay: %f, Energy: %f", e->totalCap, e->availCap,
+	//	e->cost, e->delay, e->energy);
 	return;
 }
 
@@ -2216,8 +2318,7 @@ void add_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l)
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-struct edges_t* find_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l) {
-		
+struct edges_t* find_edge_in_targetedVertice_set(struct targetNodes_t* w, struct link_t* l) {		
 	for (gint i = 0; i < w->numEdges; i++) {
 		struct edges_t* e = &(w->edges[i]);
 		if (strcmp(e->linkId, l->linkId) == 0) {
@@ -2234,32 +2335,37 @@ struct edges_t* find_edge_in_targetedVertice_set(struct targetNodes_t* w, struct
  * contents/info of the link list
  *
  * @param set
+ * @param activeFlag
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void build_contextSet_linklList(struct contextSet_t* set) {
-	g_assert(set);
-	
+void build_contextSet_linklList(GList** set, gint activeFlag) {	
 	// for each link in linkList:
 	// 1st- Retrieve endpoints A --> B feauture (context Id, device Id, endpoint Id)
 	// 2st - In the graph associated to the contextId, check wheter A (deviceId) is in the vertices list
 	// o No, this is weird ... exist
 	// o Yes, get the other link endpoint (i.e., B) and check whether it exists. If NOT add it, considering
 	// all the attributes; Otherwise, check whether the link is different from existing edges between A and B
+	gdouble epsilon = 0.1;
+	gint j = 0;
+	for (GList* ln = g_list_first(linkList);
+		ln;
+		ln = g_list_next(ln)) {
+		struct link_t* l = (struct link_t*)(ln->data);
+		j++;
 
-	for (gint j = 0; j < linkList->numLinks; j++) {
-		struct link_t* l = &(linkList->links[j]);
-		// link assumed to be P2P A --> B; I.e. 2 endPoints; 1st specifies A and 2nd specifie B
+		// link assumed to be P2P A --> B; i.e. 2 endPoints; 1st specifies A and 2nd specifie B
 		struct link_endpointId_t* aEndpointId = &(l->linkEndPointId[0]);
 		struct topology_id_t* topologyId = &(aEndpointId->topology_id);
 		// get the contextId
 		gchar contextUuid[UUID_CHAR_LENGTH];
 		duplicate_string(contextUuid, topologyId->contextId);
-		//DEBUG_PC("Link: %s in Context: %s", l->linkId, contextUuid);
+		DEBUG_PC("Link: %s in ContextId: %s", l->linkId, contextUuid);
 
 		// Check first contextUuid exists in the cSet
+		//DEBUG_PC("Length of Context: %d", g_list_length(set));
 		struct context_t* c = find_contextId_in_set(contextUuid, set);
 		if (c == NULL) {
 			DEBUG_PC("ContextId: %s does NOT exist... weird", contextUuid);
@@ -2273,28 +2379,29 @@ void build_contextSet_linklList(struct contextSet_t* set) {
 		struct graph_t* g = &(c->g); // get the graph associated to the context c
 		struct vertices_t* v = find_vertex_in_graph_context(g, aDeviceId);
 		if (v == NULL) {
-			DEBUG_PC("aDeviceId: %s IS NOT IN Vertices of contextId: %s", aDeviceId, contextUuid);
+			DEBUG_PC("%s NOT a VERTEX of contextId: %s ... WEIRD", aDeviceId, contextUuid);
 			exit(-1);
 		}		
 		// get the bEndpointId
 		struct link_endpointId_t* bEndpointId = &(l->linkEndPointId[1]);
 		gchar bDeviceId[UUID_CHAR_LENGTH];
 		duplicate_string(bDeviceId, bEndpointId->deviceId);
+		DEBUG_PC("[%d] -- Link: %s [%s ==> %s]", j-1, l->linkId, aDeviceId, bDeviceId);
 		// Check whether device B is in the targeted Vertices from A (i.e., v)?
 		// If not, add B in the targeted vertices B + create the edge and add it
 		// If B exist, check whether the explored link/edge is already in the list of edges
 		struct targetNodes_t* w = find_targeted_vertex_in_graph_context(v, bDeviceId);
 		if (w == NULL) {
-			//DEBUG_PC("B device [%s] is PEER of A device [%s]", bDeviceId, v->verticeId.nodeId);
+			DEBUG_PC("[%s] is PEER of [%s]", bDeviceId, v->verticeId.nodeId);
 			w = add_targeted_vertex_in_graph_context(v, bDeviceId);
-			add_edge_in_targetedVertice_set(w, l);
+			add_edge_in_targetedVertice_set(w, l, activeFlag);
 		}
 		else {
 			// w exists, it is needed to check whether the edge (link) should be added
 			struct edges_t* e = find_edge_in_targetedVertice_set(w, l);
 			if (e == NULL) {
 				// Add the link into the list
-				add_edge_in_targetedVertice_set(w, l);
+				add_edge_in_targetedVertice_set(w, l, activeFlag);
 			}
 			else {
 				DEBUG_PC("The link already exists ...");
@@ -2316,21 +2423,47 @@ void build_contextSet_linklList(struct contextSet_t* set) {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void build_contextSet(struct contextSet_t* cSet) {
-	g_assert(cSet);
-	g_assert(deviceList);
-	g_assert(linkList);
+void build_contextSet(GList** cSet) {
+	gint activeFlag = 0; // this means that all the devices/links (regardless they are active or not) are considered
 
 	// devices are tied to contexts, i.e. depending on the contextId of the devices
-	build_contextSet_deviceList(cSet);
+	build_contextSet_deviceList(cSet, activeFlag);
+
+	DEBUG_PC("Length for the Context Set: %d", g_list_length(*cSet));
 
 	// Once the diverse contexts are created and the devices/endpoints asigned to the 
 	// respective graph tied to each context, it is needed to create the edges
-	build_contextSet_linklList(cSet);
+	build_contextSet_linklList(cSet, activeFlag);
 
 	return;
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Create the set of (distinct) contexts with the deviceList and linkList with
+ * operational status active
+ *
+ * @param cSet
+ *
+ * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void build_contextSet_active(GList** cSet) {
+	gint activeFlag = 1; // this means that all the devices (regardless they are active or not) are considered
+
+	// devices are tied to contexts, i.e. depending on the contextId of the devices
+	build_contextSet_deviceList(cSet, activeFlag);
+
+	DEBUG_PC("Length for the Context Set: %d", g_list_length(*cSet));
+
+	// Once the diverse contexts are created and the devices/endpoints asigned to the 
+	// respective graph tied to each context, it is needed to create the edges
+	build_contextSet_linklList(cSet, activeFlag);
+	return;
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////
 /**
  * 	@file pathComp_tools.c
@@ -2342,11 +2475,14 @@ void build_contextSet(struct contextSet_t* cSet) {
  *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
-void print_contextSet(struct contextSet_t* set) {
-	g_assert(set);
+void print_contextSet(GList* set) {
+
+	DEBUG_PC("Printing the ContextSet w/ number of Elements: %d", g_list_length(set));
 
-	for (gint i = 0; i < set->num_context_set; i++) {
-		struct context_t* c = &(set->contextList[i]);
+	for (GList* ln = g_list_first(set);
+		ln;
+		ln = g_list_next(ln)) {
+		struct context_t* c = (struct context_t*)(ln->data);
 		DEBUG_PC("-------------------------------------------------------------");
 		DEBUG_PC(" Context Id: %s", c->contextId);
 		DEBUG_PC("-------------------------------------------------------------");
@@ -2422,8 +2558,7 @@ gint same_src_dst_pe_nodeid(struct service_t* s)
 /////////////////////////////////////////////////////////////////////////////////////////
 void comp_route_connection_issue_handler (struct compRouteOutput_t *path, struct service_t *s)
 {
-	g_assert(path);
-	g_assert(s);
+	g_assert(path); g_assert(s);
 
 	// Increase the number of computed routes/paths despite there was an issue to be reported		
 	path->numPaths++;	
@@ -2472,8 +2607,7 @@ void destroy_compRouteOutputList (struct compRouteOutputList_t *ro)
  */
 /////////////////////////////////////////////////////////////////////////////////////////
 void duplicate_graph (struct graph_t *originalGraph, struct graph_t *destGraph)	{
-	g_assert (originalGraph);
-	g_assert (destGraph);
+	g_assert (originalGraph); g_assert (destGraph);
 	
 	destGraph->numVertices = originalGraph->numVertices;
 	for (gint i = 0; i < originalGraph->numVertices; i++) {
@@ -2481,6 +2615,7 @@ void duplicate_graph (struct graph_t *originalGraph, struct graph_t *destGraph)
 		struct vertices_t *dVertex = &(destGraph->vertices[i]);
 		dVertex->numTargetedVertices = oVertex->numTargetedVertices;		
 		duplicate_node_id (&oVertex->verticeId, &dVertex->verticeId);
+		memcpy(&dVertex->power_idle, &oVertex->power_idle, sizeof(gdouble));
 		
 		for (gint j = 0; j < oVertex->numTargetedVertices; j++)	{
 			struct targetNodes_t *oTargetedVertex = &(oVertex->targetedVertices[j]);
@@ -2596,9 +2731,7 @@ struct edges_t* get_reverse_edge_from_the_graph(struct edges_t* e, struct graph_
 /////////////////////////////////////////////////////////////////////////////////////////
 void allocate_graph_resources (struct path_t *p, struct service_t *s, struct graph_t *g)
 {
-	g_assert (p);
-	g_assert (s);
-	g_assert (g);
+	g_assert (p); g_assert (s); g_assert (g);
 	// Retrieve the requested bw by the service
 	struct path_constraints_t* pathCons = get_path_constraints(s);
 
@@ -2617,8 +2750,7 @@ void allocate_graph_resources (struct path_t *p, struct service_t *s, struct gra
 		memcpy(&e->availCap, &resBw, sizeof(gdouble));
 		DEBUG_PC("Final e/link avail Bw: %f", e->availCap);	
 	}
-	g_free(pathCons);
-	
+	g_free(pathCons);	
 	return;
 }
 
@@ -2633,14 +2765,12 @@ void allocate_graph_resources (struct path_t *p, struct service_t *s, struct gra
  *	@parma g
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2021
+ *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void allocate_graph_reverse_resources(struct path_t* p, struct service_t * s, struct graph_t* g)
 {
-	g_assert(p);
-	g_assert(s);
-	g_assert(g);
+	g_assert(p); g_assert(s); g_assert(g);
 
 	struct path_constraints_t* pathCons = get_path_constraints(s);
 	for (gint i = 0; i < p->numPathLinks; i++) {
@@ -2674,20 +2804,20 @@ void allocate_graph_reverse_resources(struct path_t* p, struct service_t * s, st
  *	@param routeList
  *
  * 	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2021
+ *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void print_path_connection_list(struct compRouteOutputList_t* routeList) {
 	g_assert(routeList);
 	for (gint i = 0; i < routeList->numCompRouteConnList; i++) {
-		DEBUG_PC("==================== Service Item: %d ===================", i);
+		DEBUG_PC("==================== Service instance: %d ===================", i);
 		struct compRouteOutput_t* rO = &(routeList->compRouteConnection[i]);
 		DEBUG_PC("num service endpoints: %d", rO->num_service_endpoints_id);
 		struct serviceId_t* s = &(rO->serviceId);
 		DEBUG_PC("ContextId: %s, ServiceId: %s", s->contextId, s->service_uuid);
-		DEBUG_PC("ingress --- %s [%s]", rO->service_endpoints_id[0].device_uuid, 
+		DEBUG_PC("ingress - %s[%s]", rO->service_endpoints_id[0].device_uuid, 
 			rO->service_endpoints_id[0].endpoint_uuid);
-		DEBUG_PC("egress --- %s [%s]", rO->service_endpoints_id[1].device_uuid,
+		DEBUG_PC("egress - %s [%s]", rO->service_endpoints_id[1].device_uuid,
 			rO->service_endpoints_id[1].endpoint_uuid);
 
 		if (rO->noPathIssue == NO_PATH_CONS_ISSUE) {
@@ -2713,7 +2843,7 @@ void print_path_connection_list(struct compRouteOutputList_t* routeList) {
  *	@param d
  *
  *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
- *	@date 2021
+ *	@date 2022
  */
  /////////////////////////////////////////////////////////////////////////////////////////
 void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct timeval d, gint numSuccesPathComp, gint numPathCompIntents) {
@@ -2729,8 +2859,11 @@ void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct
 	DEBUG_PC("Succesfully Comp: %d | Path Comp Requests: %d", numSuccesPathComp, numPathCompIntents);
 	DEBUG_PC("AV. PATH COMP ALG. TIME: %f ms", av_alg_comp_time);
 
-	for (gint i = 0; i < serviceList->numServiceList; i++) {
-		struct service_t* s = &(serviceList->services[i]);
+	gint i = 0;
+	for (GList* listnode = g_list_first(serviceList);
+		listnode;
+		listnode = g_list_next(listnode), i++) {
+		struct service_t* s = (struct service_t*)(listnode->data);
 		char* eptr;
 		for (gint j = 0; j < s->num_service_constraints; j++) {
 			struct constraint_t* constraints = &(s->constraints[j]);
@@ -2739,6 +2872,7 @@ void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct
 			}
 		}
 	}
+
 	for (gint k = 0; k < routeConnList->numCompRouteConnList; k++) {
 		struct compRouteOutput_t* rO = &(routeConnList->compRouteConnection[k]);
 		if (rO->noPathIssue == NO_PATH_CONS_ISSUE) {
@@ -2764,5 +2898,500 @@ void update_stats_path_comp(struct compRouteOutputList_t* routeConnList, struct
 	gdouble avBlockedBwRatio = (gdouble)(1.0 - avServedRatio);
 	DEBUG_PC("AV. BBE: %f", avBlockedBwRatio);
 	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate active service	path
+ *
+ *  @param actServPath
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_active_service_path(struct activeServPath_t* actServPath) {
+	g_assert(actServPath);
+	g_free(actServPath);
+	return;
+}
 
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate active service
+ *
+ *  @param actService
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_active_service(struct activeService_t* actService) {
+	g_assert(actService);
+	g_list_free_full(g_steal_pointer(&actService->activeServPath), (GDestroyNotify)destroy_active_service_path);
+	g_free(actService);
+	return;
 }
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a requested service 
+ *
+ *  @param s
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_requested_service(struct service_t* s) {
+	g_assert(s);
+	g_free(s);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a device
+ *
+ *  @param d
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_device(struct device_t* d) {
+	g_assert(d);
+	g_free(d);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a link	from the linkList
+ *
+ *  @param d
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_link(struct link_t* l) {
+	g_assert(l);
+	g_free(l);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Eliminate a context from the contextSet
+ *
+ *  @param d
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void destroy_context(struct context_t* c) {
+	g_assert(c);
+	g_free(c);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief Excecution Dijkstra algorithm
+ *
+ *  @param srcMapIndex
+ *  @param dstMapIndex
+ *	@param g
+ *	@param s
+ *  @param mapNodes
+ *  @param SN
+ *  @param RP
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct service_t* s, 
+	struct map_nodes_t* mapNodes, struct nodes_t* SN, struct compRouteOutputItem_t* RP,
+	guint arg) {
+	g_assert(s);g_assert(g);
+
+	// Set params into mapNodes related to the source nodes of the request
+	mapNodes->map[srcMapIndex].distance = 0.0;
+	mapNodes->map[srcMapIndex].latency = 0.0;
+	mapNodes->map[srcMapIndex].avaiBandwidth = 0.0;
+	mapNodes->map[srcMapIndex].power = 0.0;
+
+	// Initialize the set Q and S
+	GList *S = NULL, *Q = NULL;
+	gint indexVertice = -1;
+
+	//  Add the source into the Q
+	struct nodeItem_t* nodeItem = g_malloc0(sizeof(struct nodeItem_t));
+	if (nodeItem == NULL) {
+		DEBUG_PC("memory allocation failed\n");
+		exit(-1);
+	}
+	// initialize some nodeItem attributes
+	nodeItem->distance = 0.0;
+	nodeItem->latency = 0.0;
+	nodeItem->power = 0.0;
+	duplicate_node_id(&mapNodes->map[srcMapIndex].verticeId, &nodeItem->node);
+
+	// Select the optimization process
+	if (arg & ENERGY_EFFICIENT_ARGUMENT)
+		Q = g_list_insert_sorted(Q, nodeItem, sort_by_energy);
+	// more "if" according to different optimization criteria ...
+	else
+		Q = g_list_insert_sorted(Q, nodeItem, sort_by_distance);
+
+	// Check whether there is spurNode (SN) and rootPath (RP)
+	if (SN != NULL && RP != NULL) {
+		struct routeElement_t* re;
+		for (gint j = 0; j < RP->numRouteElements; j++) {
+			// Get the source and target Nodes of the routeElement within the rootPath
+			re = &RP->routeElement[j];
+			DEBUG_PC("root Link: aNodeId: %s (%s) --> zNodeiId: %s (%s)", re->aNodeId.nodeId, re->aEndPointId, re->zNodeId.nodeId, re->zEndPointId);
+
+			// if ingress of the root link (aNodeId) is the spurNode, then stops
+			if (compare_node_id(&re->aNodeId, SN) == 0) {
+				DEBUG_PC("root Link: aNodeId: %s and spurNode: %s -- stop exploring the rootPath (RP)", re->aNodeId.nodeId, SN->nodeId);
+				break;
+			}
+			// Extract from Q
+			GList* listnode = g_list_first(Q);
+			struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
+			Q = g_list_remove(Q, node);
+
+			//DEBUG_RL_RA ("Exploring node %s", node->node.nodeId);
+			indexVertice = graph_vertice_lookup(node->node.nodeId, g);
+			g_assert(indexVertice >= 0);
+
+			// Get the indexTargetedVertice
+			gint indexTVertice = -1;
+			indexTVertice = graph_targeted_vertice_lookup(indexVertice, re->zNodeId.nodeId, g);
+			gint done = check_link(node, indexVertice, indexTVertice, g, s, &S, &Q, mapNodes, arg);
+			(void)done;
+			// Add to the S list
+			S = g_list_append(S, node);
+		}
+		// Check that the first node in Q set is SpurNode, otherwise something went wrong ...
+		if (compare_node_id(&re->aNodeId, SN) != 0) {
+			//DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId);
+			g_list_free_full(g_steal_pointer(&S), g_free);
+			g_list_free_full(g_steal_pointer(&Q), g_free);
+			return;
+		}
+	}
+
+	while (g_list_length(Q) > 0) {
+		//Extract from Q set
+		GList* listnode = g_list_first(Q);
+		struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
+		Q = g_list_remove(Q, node);
+		DEBUG_PC("Q length: %d", g_list_length(Q));
+		DEBUG_PC("DeviceId: %s", node->node.nodeId);
+
+		// visit all the links from u within the graph
+		indexVertice = graph_vertice_lookup(node->node.nodeId, g);
+		g_assert(indexVertice >= 0);
+
+		// Check the targeted vertices from u
+		for (gint i = 0; i < g->vertices[indexVertice].numTargetedVertices; i++) {
+			gint done = check_link(node, indexVertice, i, g, s, &S, &Q, mapNodes, arg);
+			(void)done;
+		}
+		// Add node into the S Set
+		S = g_list_append(S, node);
+		//DEBUG_PC ("S length: %d", g_list_length (S));              
+	}
+	g_list_free_full(g_steal_pointer(&S), g_free);
+	g_list_free_full(g_steal_pointer(&Q), g_free);
+	return;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief KSP computation using Dijkstra algorithm
+ *
+ *  @param pred
+ *  @param g
+ *	@param s
+  *	@param SN
+ *	@param RP
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s,
+	struct nodes_t* SN, struct compRouteOutputItem_t* RP, 
+	struct map_nodes_t* mapNodes, guint arg) {
+	g_assert(pred); g_assert(g); g_assert(s);
+
+	DEBUG_PC("Source: %s -- Destination: %s", s->service_endpoints_id[0].device_uuid, s->service_endpoints_id[1].device_uuid);
+
+	// Check the both ingress src and dst endpoints are in the graph
+	gint srcMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[0].device_uuid, mapNodes);
+	if (srcMapIndex == -1) {
+		DEBUG_PC("ingress DeviceId: %s NOT in the graph", s->service_endpoints_id[0].device_uuid);
+		return -1;
+	}
+	
+	gint dstMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
+	if (dstMapIndex == -1) {
+		DEBUG_PC("egress DeviceId: %s NOT in the graph", s->service_endpoints_id[1].device_uuid);
+		return -1;
+	}
+
+	//DEBUG_PC("srcMapIndex: %d (node: %s)", srcMapIndex, mapNodes->map[srcMapIndex].verticeId.nodeId);
+	//DEBUG_PC("dstMapIndex: %d (node: %s)", dstMapIndex, mapNodes->map[dstMapIndex].verticeId.nodeId);
+
+	// Compute the shortest path route
+	dijkstra(srcMapIndex, dstMapIndex, g, s, mapNodes, SN, RP, arg);
+
+	// Check that a feasible solution in term of latency and bandwidth is found
+	gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
+	struct map_t* dest_map = &mapNodes->map[map_dstIndex];
+	if (!(dest_map->distance < INFINITY_COST)) {
+		DEBUG_PC("destination: %s NOT reachable", s->service_endpoints_id[1].device_uuid);
+		return -1;
+	}
+
+	DEBUG_PC("AvailBw @ %s is %f", dest_map->verticeId.nodeId, dest_map->avaiBandwidth);
+	// Check that the computed available bandwidth is larger than 0.0
+	if (dest_map->avaiBandwidth <= (gfloat)0.0) {
+		DEBUG_PC("dst: %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid);
+		return -1;
+	}
+	DEBUG_PC("dst: %s REACHABLE", s->service_endpoints_id[1].device_uuid);
+	// Handle predecessors
+	build_predecessors(pred, s, mapNodes);
+	return 1;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief set the path parameters (e.g., latency, cost, power, ...) to an under-constructed
+ * path from the computed map vertex
+ *
+ *  @param p
+ *  @param mapV
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void set_path_attributes(struct compRouteOutputItem_t* p, struct map_t* mapV) {
+	g_assert(p); g_assert(mapV);
+	memcpy(&p->cost, &mapV->distance, sizeof(gdouble));
+	memcpy(&p->availCap, &mapV->avaiBandwidth, sizeof(mapV->avaiBandwidth));
+	memcpy(&p->delay, &mapV->latency, sizeof(mapV->latency));
+	memcpy(&p->power, &mapV->power, sizeof(gdouble));
+	return;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * 	@file pathComp_tools.c
+ * 	@brief K-CSPF algorithm execution (YEN algorithm)
+ *
+ *  @param s
+ *  @param path
+ *  @param g
+ *  @param optimization_flag
+ *
+ *	@author Ricardo Martínez <ricardo.martinez@cttc.es>
+ *	@date 2022
+ */
+ /////////////////////////////////////////////////////////////////////////////////////////
+void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_t* g, guint arg) {
+	g_assert(s); g_assert(path); g_assert(g);
+
+	// create map of devices/nodes to handle the path computation using the context
+	struct map_nodes_t* mapNodes = create_map_node();
+	build_map_node(mapNodes, g);
+
+	// predecessors to store the computed path    
+	struct pred_t* predecessors = create_predecessors();
+	struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[0]);
+	struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]);
+
+	// Compute the 1st KSP path
+	gint done = ksp_comp(predecessors, g, s, NULL, NULL, mapNodes, arg);
+	if (done == -1) {
+		DEBUG_PC("NO PATH for %s[%s] --> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid);
+		comp_route_connection_issue_handler(path, s);
+		g_free(mapNodes); g_free(predecessors);
+		return;
+	}
+
+	// Construct the path from the computed predecessors
+	struct compRouteOutputItem_t* p = create_path_item();
+	//print_predecessors(predecessors);
+	build_path(p, predecessors, s);
+	gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
+	struct map_t* dst_map = &mapNodes->map[indexDest];
+	// Get the delay and cost
+	set_path_attributes(p, dst_map);		
+
+	// Add the computed path, it may be a not feasible path, but at the end it is
+	// checked all the feasible paths, and select the first one
+	print_path(p);
+	
+	// Copy the serviceId
+	copy_service_id(&path->serviceId, &s->serviceId);
+	// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
+	for (gint i = 0; i < s->num_service_endpoints_id; i++) {
+		struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[i]);
+		struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[i]);
+		copy_service_endpoint_id(oEp, iEp);
+	}
+	path->num_service_endpoints_id = s->num_service_endpoints_id;
+
+	DEBUG_PC("COMPUTE UP TO K Feasible Paths A[%d]", MAX_KSP_VALUE);	
+	// Create A and B sets of paths to handle the YEN algorithm
+	struct path_set_t *A = create_path_set(), *B = create_path_set();
+	// Add 1st Computed path into A->paths[0]	
+	duplicate_path(p, &A->paths[0]);
+	A->numPaths++;
+	g_free(predecessors); g_free(p);
+	for (gint k = 1; k < MAX_KSP_VALUE; k++) {
+		DEBUG_PC("*************************** kth (%d) ***********************************", k);
+		struct compRouteOutputItem_t* p = create_path_item();
+		duplicate_path(&A->paths[k - 1], p);
+		// The spurNode ranges from near-end node of the first link to the near-end of the last link forming the kth path
+		gint i = 0;
+		struct compRouteOutputItem_t* rootPath = create_path_item();
+		for (i = 0; i < p->numRouteElements; i++) {
+			struct nodes_t *spurNode = create_node(), *nextSpurNode = create_node();
+			struct routeElement_t* re = &(p->routeElement[i]);
+			// Create predecessors to store the computed path
+			struct pred_t* predecessors = create_predecessors();
+			// Clear previous mapNodes, i.e. create it again
+			g_free(mapNodes);
+			mapNodes = create_map_node();
+			build_map_node(mapNodes, g);
+			struct nodes_t* n = &re->aNodeId;
+			duplicate_node_id(n, spurNode);
+			n = &re->zNodeId;
+			duplicate_node_id(n, nextSpurNode);
+			DEBUG_PC("spurNode: %s --> nextSpurNode: %s", spurNode->nodeId, nextSpurNode->nodeId);
+
+			// rootPath contains a set of links of A[k-1] from the source Node till the SpurNode -> NextSpurNode
+			// Example: A[k-1] = {L1, L2, L3, L4}, i.e. " Node_a -- L1 --> Node_b -- L2 --> Node_c -- L3 --> Node_d -- L4 --> Node_e "
+			// E.g., for the ith iteration if the spurNode = Node_c and NextSpurNode = Node_d; then rootPath = {L1, L2, L3}			
+			add_routeElement_path_back(re, rootPath);
+			DEBUG_PC("\n");
+			DEBUG_PC("^^^^^^^rootPath^^^^^^^");
+			print_path(rootPath);
+
+			// For all existing and computed paths p in A check if from the source to the NextSpurNode
+			// the set of links matches with those contained in the rootPath
+			// If YES, remove from the auxiliary graph the next link in p from NextSpurNode
+			// Otherwise do nothing 
+			struct graph_t* gAux = create_graph();
+			duplicate_graph(g, gAux);
+			// Modified graph
+			modify_targeted_graph(gAux, A, rootPath, spurNode);
+
+			// Trigger the computation of the path from src to dst constrained to traverse all the links from src 
+			// to spurNode contained into rootPath over the resulting graph			
+			if (ksp_comp(predecessors, gAux, s, spurNode, rootPath, mapNodes, arg) == -1) {
+				DEBUG_PC("FAILED SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
+				g_free(nextSpurNode); g_free(spurNode);
+				g_free(gAux); g_free(predecessors);
+				continue;
+			}
+			DEBUG_PC("SUCCESFUL SP from %s via spurNode: %s to %s", iEp->device_uuid, spurNode->nodeId, eEp->device_uuid);
+			// Create the node list from the predecessors
+			struct compRouteOutputItem_t* newKpath = create_path_item();
+			build_path(newKpath, predecessors, s);
+			DEBUG_PC("new K (for k: %d) Path is built", k);
+			gint indexDest = get_map_index_by_nodeId(eEp->device_uuid, mapNodes);
+			struct map_t* dst_map = &mapNodes->map[indexDest];
+			set_path_attributes(newKpath, dst_map);
+			DEBUG_PC("New PATH (@ kth: %d) ADDED to B[%d] - {Path Cost: %f, e2e latency: %f, bw: %f, Power: %f ", k, B->numPaths, newKpath->cost, 
+													newKpath->delay, newKpath->availCap, newKpath->power);
+			// Add the computed kth SP to the heap B
+			duplicate_path(newKpath, &B->paths[B->numPaths]);
+			B->numPaths++;
+			DEBUG_PC("Number of B paths: %d", B->numPaths);
+
+			g_free(newKpath); g_free(nextSpurNode); g_free(spurNode);
+			g_free(gAux); g_free(predecessors);
+		}
+		// If B is empty then stops
+		if (B->numPaths == 0) {
+			DEBUG_PC("B does not have any path ... the stops kth computation");
+			break;
+		}
+
+		// Sort the potential B paths according to different optimization parameters
+		sort_path_set(B, arg);
+		// Add the lowest path into A[k]		
+		DEBUG_PC("-------------------------------------------------------------");
+		DEBUG_PC("Append SP for B[0] to A[%d] --- Cost: %f, Latency: %f, Power: %f", A->numPaths, B->paths[0].cost, 
+																				B->paths[0].delay, B->paths[0].power);
+		duplicate_path(&B->paths[0], &A->paths[A->numPaths]);
+		A->numPaths++;
+		DEBUG_PC("A Set size: %d", A->numPaths);
+		DEBUG_PC("-------------------------------------------------------------");
+
+		// Remove/Pop front element from the path set B (i.e. remove B[0])
+		pop_front_path_set(B);
+		DEBUG_PC("B Set Size: %d", B->numPaths);
+	}
+
+	// Copy the serviceId
+	copy_service_id(&path->serviceId, &s->serviceId);
+	// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
+	for (gint m = 0; m < s->num_service_endpoints_id; m++) {
+		struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[m]);
+		struct service_endpoints_id_t* oEp = &(s->service_endpoints_id[m]);
+		copy_service_endpoint_id(oEp, iEp);
+	}
+
+	// Print all the paths i A
+	for (gint h = 0; h < A->numPaths; h++) {
+		DEBUG_PC("================== A[%d] =======================", h);
+		print_path(&A->paths[h]);
+	}
+	DEBUG_PC("Number of paths: %d", path->numPaths);
+	// For all the computed paths in A, pick the one being feasible wrt the service constraints
+	for (gint ksp = 0; ksp < A->numPaths; ksp++) {
+		if (ksp >= MAX_KSP_VALUE) {
+			DEBUG_PC("Number Requested paths (%d) REACHED - STOP", ksp);
+			break;
+		}
+		gdouble feasibleRoute = check_computed_path_feasability(s, &A->paths[ksp]);
+		if (feasibleRoute == TRUE) {
+			DEBUG_PC("A[%d] available: %f, pathCost: %f; latency: %f, Power: %f", ksp, A->paths[ksp].availCap, A->paths[ksp].cost, A->paths[ksp].delay, A->paths[ksp].power);
+			struct compRouteOutputItem_t* pathaux = &A->paths[ksp];
+			path->numPaths++;
+			struct path_t* targetedPath = &path->paths[path->numPaths - 1];
+			duplicate_path_t(pathaux, targetedPath);
+			print_path_t(targetedPath);
+			remove_path_set(A);
+			remove_path_set(B);
+			return;
+		}
+	}
+	remove_path_set(A);
+	remove_path_set(B);
+	// No paths found --> Issue	
+	DEBUG_PC("K-SP failed!!!");
+	comp_route_connection_issue_handler(path, s);
+	return;
+}
\ No newline at end of file
diff --git a/src/pathcomp/backend/pathComp_tools.h b/src/pathcomp/backend/pathComp_tools.h
index b770788910a04f76a8625a7e2d74fca5f2f6ecad..cac66f81c561502a6d93249f5e44a6195cb0f61b 100644
--- a/src/pathcomp/backend/pathComp_tools.h
+++ b/src/pathcomp/backend/pathComp_tools.h
@@ -23,12 +23,17 @@
 #include <uuid/uuid.h>
 
 // External variables
-extern struct map_nodes_t* mapNodes;
-extern struct graph_t* graph;
-extern struct contextSet_t* contextSet;
-extern struct linkList_t* linkList;
-extern struct deviceList_t* deviceList;
-extern struct serviceList_t* serviceList;
+extern GList* contextSet;
+extern GList* linkList;
+extern GList* deviceList;
+extern GList* serviceList;
+extern GList* activeServList;
+
+//////////////////////////////////////////////////////////
+// Optimization computation argument 
+//////////////////////////////////////////////////////////
+#define NO_OPTIMIZATION_ARGUMENT		0x00000000
+#define ENERGY_EFFICIENT_ARGUMENT		0x00000001
 
 #define INFINITY_COST                   0xFFFFFFFF
 #define MAX_NUM_PRED					100
@@ -54,8 +59,9 @@ struct nodes_t {
 
 struct nodeItem_t {
     struct nodes_t node;
-    gdouble distance;
-	gdouble latency;
+    gdouble distance; // traversed distance
+	gdouble latency; // incured latency
+	gdouble power; //consumed power
 };
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -80,7 +86,8 @@ struct edges_t {
 	gdouble totalCap, availCap;
 	
 	gdouble cost;	
-	gdouble delay;	
+	gdouble delay;
+	gdouble energy;
 
 	// inter-domain local and remote Ids
 	gchar interDomain_localId[MAX_INTER_DOMAIN_PLUG_IN_SIZE];
@@ -107,7 +114,8 @@ struct map_t {
 	struct edges_t predecessor;
 	gdouble distance;
 	gdouble avaiBandwidth;
-	gdouble latency;	
+	gdouble latency;
+	gdouble power;
 };
 
 #define MAX_MAP_NODE_SIZE				100
@@ -131,6 +139,7 @@ struct vertices_t {
 	struct targetNodes_t targetedVertices[MAX_NUM_VERTICES];
 	gint numTargetedVertices;
     struct nodes_t verticeId;
+	gdouble power_idle; // power idle of the device (due to the fans, etc.)
 };
 
 struct graph_t {
@@ -147,15 +156,6 @@ struct context_t {
 	struct graph_t g;
 };
 
-////////////////////////////////////////////////////
-// Structure for the Set of Contexts
-///////////////////////////////////////////////////
-#define MAX_NUMBER_CONTEXT		1 // 100 # LGR: reduced from 100 to 1 to divide by 100 the memory used
-struct contextSet_t {
-	struct context_t contextList[MAX_NUMBER_CONTEXT];
-	gint num_context_set;
-};
-
 #define MAX_ALG_ID_LENGTH		10
 ////////////////////////////////////////////////////
 // External Variables
@@ -241,6 +241,8 @@ struct endPoint_t {
 	struct capacity_t available_capacity;
 	// inter-domain identifiers
 	struct inter_domain_plug_in_t inter_domain_plug_in;
+	gfloat energyConsumption; // in nJ/bit
+	gint operational_status; // 0 Undefined, 1 Disabled, 2 Enabled
 };
 
 ///////////////////////////////////////////////////////////////////
@@ -249,25 +251,15 @@ struct endPoint_t {
 #define MAX_DEV_TYPE_SIZE				128
 #define MAX_DEV_ENDPOINT_LENGTH			50	// 10 # LGR: controllers might have large number of endpoints
 struct device_t {
+	gdouble power_idle; // power idle (baseline) of the switch in Watts
+	gint operational_status; // 0 - Undefined, 1 - Disabled, 2 - Enabled
 	gchar deviceId[UUID_CHAR_LENGTH]; // device ID using UUID (128 bits)
-
 	gchar deviceType[MAX_DEV_TYPE_SIZE]; // Specifies the device type
-
 	// define the endpoints attached to the device
 	gint numEndPoints;
 	struct endPoint_t endPoints[MAX_DEV_ENDPOINT_LENGTH];
 };
 
-///////////////////////////////////////////////////////////////////
-// Structure for the device List
-///////////////////////////////////////////////////////////////////
-#define MAX_NUM_DEVICE		200
-struct deviceList_t {
-	// device information
-	gint numDevices;
-	struct device_t devices[MAX_NUM_DEVICE];
-};
-
 ///////////////////////////////////////////////////////////////////
 // Structure for the link EndPoint Id
 ///////////////////////////////////////////////////////////////////
@@ -294,6 +286,13 @@ struct latency_characteristics_t {
 	gdouble fixed_latency;
 };
 
+///////////////////////////////////////////////////////////////////
+// Structure for the latency characteristics of the link
+///////////////////////////////////////////////////////////////////
+struct power_characteristics_t {
+	gdouble power;
+};
+
 ///////////////////////////////////////////////////////////////////
 // Structure for the link 
 ///////////////////////////////////////////////////////////////////
@@ -304,10 +303,10 @@ struct latency_characteristics_t {
 #define LINK_FORWARDING_DIRECTION_UNKNOWN						2
 struct link_t {
 	gchar linkId[UUID_CHAR_LENGTH]; // link Id using UUID (128 bits)
-
+	//gdouble energy_link; // in nJ/bit
+	//gint operational_status; // 0 Undefined, 1 Disabled, 2 Enabled
 	gint numLinkEndPointIds;
 	struct link_endpointId_t linkEndPointId[MAX_NUM_LINK_ENDPOINT_IDS];
-
 	guint forwarding_direction;
 	struct capacity_t potential_capacity;
 	struct capacity_t available_capacity;
@@ -315,15 +314,6 @@ struct link_t {
 	struct latency_characteristics_t latency_characteristics;
 };
 
-///////////////////////////////////////////////////////////////////
-// Structure for the link List
-///////////////////////////////////////////////////////////////////
-#define MAX_NUM_LIST							2000
-struct linkList_t {
-	gint numLinks;
-	struct link_t links[MAX_NUM_LIST];
-};
-
 ////////////////////////////////////////////////////
 // Structure for service Identifier
 ///////////////////////////////////////////////////
@@ -365,12 +355,10 @@ struct constraint_t {
 struct service_t {
 	// Indentifier used to determine the used Algorithm Id, e.g., KSP
 	gchar algId[MAX_ALG_ID_LENGTH];
-
 	// PATHS expected for the output
 	guint kPaths;
 	
 	struct serviceId_t serviceId;
-
 	guint service_type;	 // unknown, l2nm, l3nm, tapi
 
 	// endpoints of the network connectivity service, assumed p2p
@@ -403,10 +391,27 @@ struct path_constraints_t {
 ////////////////////////////////////////////////////
 // Structure for the handling the service requests
 ///////////////////////////////////////////////////
-#define MAX_SERVICE_LIST						100
-struct serviceList_t {
-	struct service_t services[MAX_SERVICE_LIST];
-	gint numServiceList;	
+//#define MAX_SERVICE_LIST						100
+//struct serviceList_t {
+//	struct service_t services[MAX_SERVICE_LIST];
+//	gint numServiceList;	
+//};
+
+////////////////////////////////////////////////////
+// Structure for the handling the active services 
+///////////////////////////////////////////////////
+struct activeServPath_t {
+	struct topology_id_t topology_id;
+	gchar deviceId[UUID_CHAR_LENGTH];
+	gchar endPointId[UUID_CHAR_LENGTH];
+};
+
+struct activeService_t {
+	struct serviceId_t serviceId;
+	guint service_type;	 // unknown, l2nm, l3nm, tapi
+	struct service_endpoints_id_t service_endpoints_id[MAX_NUM_SERVICE_ENPOINTS_ID];
+	guint num_service_endpoints_id;
+	GList* activeServPath;
 };
 
 ////////////////////////////////////////////////////////////////////////////////////////////
@@ -425,7 +430,6 @@ struct pathLink_t {
 	gchar zEndPointId[UUID_CHAR_LENGTH];
 
 	struct topology_id_t topologyId;
-
 	struct linkTopology_t linkTopologies[2]; // a p2p link (at most) can connect to devices (endpoints) attached to 2 different topologies
 	gint numLinkTopologies;
 };
@@ -454,13 +458,13 @@ struct routeElement_t {
 	gchar contextId[UUID_CHAR_LENGTH];
 };
 
-struct compRouteOutputItem_t {
-	// Potential(total) and available capacity
+struct compRouteOutputItem_t {	
 	gint unit;
 	gdouble totalCap, availCap;
 
 	gdouble cost;
 	gdouble delay;
+	gdouble power;
 
 	struct routeElement_t routeElement[MAX_ROUTE_ELEMENTS];
 	gint numRouteElements;
@@ -477,6 +481,7 @@ struct path_t {
 	struct capacity_t path_capacity;
 	struct latency_characteristics_t path_latency;
 	struct cost_characteristics_t path_cost;
+	struct power_characteristics_t path_power;
 
 	struct pathLink_t pathLinks[MAX_NUM_PATH_LINKS];
 	guint numPathLinks;
@@ -484,18 +489,14 @@ struct path_t {
 
 #define NO_PATH_CONS_ISSUE								1	 // No path due to a constraint issue
 #define MAX_NUM_COMPUTED_PATHS							10
-struct compRouteOutput_t
-{
+struct compRouteOutput_t {
 	// object describing the service identifier: serviceId and contextId
 	struct serviceId_t serviceId;
-
 	// array describing the service endpoints ids
 	struct service_endpoints_id_t service_endpoints_id[MAX_NUM_SERVICE_ENPOINTS_ID];
 	guint num_service_endpoints_id;
-
 	struct path_t paths[MAX_NUM_COMPUTED_PATHS];
-	gint numPaths;
-	
+	gint numPaths;	
 	// if the transport connectivity service cannot be computed, this value is set to 0 determining the constraints were not fulfilled
 	gint noPathIssue;
 };
@@ -504,8 +505,7 @@ struct compRouteOutput_t
 // Structure to handle the response list with all the computed network connectivity services
 ////////////////////////////////////////////////////////////////////////////////////////////
 #define MAX_COMP_CONN_LIST		100
-struct compRouteOutputList_t
-{
+struct compRouteOutputList_t {
 	struct compRouteOutput_t compRouteConnection[MAX_COMP_CONN_LIST];
 	gint numCompRouteConnList;
 
@@ -526,6 +526,7 @@ struct compRouteOutputList_t
 // Prototype of external declaration of functions
 void print_path (struct compRouteOutputItem_t *);
 void print_path_t(struct path_t*);
+struct path_t* create_path();
 
 void duplicate_string(gchar *, gchar *);
 
@@ -533,7 +534,7 @@ gchar* get_uuid_char(uuid_t);
 void copy_service_id(struct serviceId_t*, struct serviceId_t *);
 void copy_service_endpoint_id(struct service_endpoints_id_t *, struct service_endpoints_id_t *);
 
-struct graph_t* get_graph_by_contextId(struct contextSet_t *, gchar *);
+struct graph_t* get_graph_by_contextId(GList*, gchar *);
 
 struct pred_t * create_predecessors ();
 struct edges_t* create_edge();
@@ -561,31 +562,29 @@ gint graph_targeted_vertice_add (gint, gchar *, struct graph_t *);
 void remove_edge_from_graph (struct graph_t *, struct edges_t *);
 
 struct path_set_t * create_path_set ();
-void sort_path_set (struct path_set_t *);
+void sort_path_set (struct path_set_t *, guint);
 void pop_front_path_set (struct path_set_t *);
 void remove_path_set(struct path_set_t*);
 
 void build_map_node(struct map_nodes_t *, struct graph_t *);
 struct compRouteOutputList_t * create_route_list();
+void duplicate_route_list(struct compRouteOutputList_t *, struct compRouteOutputList_t *);
 struct compRouteOutputItem_t * create_path_item (); 
 void add_routeElement_path_back (struct routeElement_t *, struct compRouteOutputItem_t *);
 gboolean matching_path_rootPath (struct compRouteOutputItem_t *, struct compRouteOutputItem_t *, struct nodes_t *, struct edges_t *);
 void modify_targeted_graph (struct graph_t *, struct path_set_t *, struct compRouteOutputItem_t *, struct nodes_t *);
 gint find_nodeId (gconstpointer, gconstpointer);
-gint check_link (struct nodeItem_t *, gint, gint, struct graph_t *, struct service_t *, GList **, GList **, struct map_nodes_t *);
+gint check_link (struct nodeItem_t *, gint, gint, struct graph_t *, struct service_t *, GList **, GList **, struct map_nodes_t *, guint arg);
 gboolean check_computed_path_feasability (struct service_t *, struct compRouteOutputItem_t * );
 
 gint sort_by_distance (gconstpointer, gconstpointer);
+gint sort_by_energy(gconstpointer, gconstpointer);
 
 struct graph_t * create_graph ();
 struct map_nodes_t * create_map_node ();
 
 struct service_t * get_service_for_computed_path(gchar *);
 
-struct deviceList_t* create_device_list();
-struct linkList_t* create_link_list();
-struct serviceList_t* create_service_list();
-
 void print_service_type(guint);
 void print_link_port_direction(guint);
 void print_termination_direction(guint);
@@ -593,9 +592,9 @@ void print_termination_state(guint);
 void print_capacity_unit(guint);
 void print_link_forwarding_direction(guint);
 
-struct contextSet_t* create_contextSet();
-void build_contextSet(struct contextSet_t *);
-void print_contextSet(struct contextSet_t *);
+void build_contextSet(GList **);
+void build_contextSet_active(GList **);
+void print_contextSet(GList *);
 
 gint same_src_dst_pe_nodeid (struct service_t *);
 void comp_route_connection_issue_handler (struct compRouteOutput_t *, struct service_t *);
@@ -610,4 +609,12 @@ struct timeval tv_adjust(struct timeval);
 
 void print_path_connection_list(struct compRouteOutputList_t*);
 void update_stats_path_comp(struct compRouteOutputList_t*, struct timeval, gint, gint);
+void destroy_active_service(struct activeService_t*);
+void destroy_requested_service(struct service_t*);
+void destroy_device(struct device_t*);
+void destroy_link(struct link_t*);
+void destroy_context(struct context_t*);
+void dijkstra(gint, gint, struct graph_t*, struct service_t*, struct map_nodes_t*, struct nodes_t*, struct compRouteOutputItem_t*, guint);
+void set_path_attributes(struct compRouteOutputItem_t*, struct map_t*);
+void alg_comp(struct service_t*, struct compRouteOutput_t*, struct graph_t*, guint);
 #endif
\ No newline at end of file
diff --git a/src/pathcomp/frontend/Config.py b/src/pathcomp/frontend/Config.py
index f17a9f5377b5abcbd9001d1d3773e26998cb3211..714eb7278074ac860caa76dc3ed8b4a40ae9f192 100644
--- a/src/pathcomp/frontend/Config.py
+++ b/src/pathcomp/frontend/Config.py
@@ -26,8 +26,9 @@ PATHCOMP_BACKEND_BASEURL = str(os.environ.get('PATHCOMP_BACKEND_BASEURL', DEFAUL
 # - first check env vars PATHCOMP_BACKEND_HOST & PATHCOMP_BACKEND_PORT
 # - if not set, check env vars PATHCOMPSERVICE_SERVICE_HOST & PATHCOMPSERVICE_SERVICE_PORT_HTTP
 # - if not set, use DEFAULT_PATHCOMP_BACKEND_HOST & DEFAULT_PATHCOMP_BACKEND_PORT
+
 backend_host = DEFAULT_PATHCOMP_BACKEND_HOST
-backend_host = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', backend_host)
+#backend_host = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', backend_host)
 PATHCOMP_BACKEND_HOST = str(os.environ.get('PATHCOMP_BACKEND_HOST', backend_host))
 
 backend_port = DEFAULT_PATHCOMP_BACKEND_PORT
diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile
index 352de75f31366b65e62e2f6357d1bd5f28bd2b0f..9384b3e19edd5e82b0efcb9706c41105a31321e3 100644
--- a/src/pathcomp/frontend/Dockerfile
+++ b/src/pathcomp/frontend/Dockerfile
@@ -62,8 +62,14 @@ RUN python3 -m pip install -r requirements.txt
 
 # Add component files into working directory
 WORKDIR /var/teraflow
-COPY src/context/. context/
-COPY src/device/. device/
+COPY src/context/__init__.py context/__init__.py
+COPY src/context/client/. context/client/
+COPY src/device/__init__.py device/__init__.py
+COPY src/device/client/. device/client/
+COPY src/service/__init__.py service/__init__.py
+COPY src/service/client/. service/client/
+COPY src/slice/__init__.py slice/__init__.py
+COPY src/slice/client/. slice/client/
 COPY src/pathcomp/. pathcomp/
 
 # Start the service
diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
index 6fc33dbd45a92405fb2fa115e12cb460a9111d54..52f1cd3d584e14ca5dee1bc5e0511e014bdc8e73 100644
--- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
+++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
@@ -13,9 +13,9 @@
 # limitations under the License.
 
 import grpc, logging, threading
-from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
-from common.proto.context_pb2 import ContextId, Empty
+from common.proto.context_pb2 import ContextId, Empty, TopologyId
 from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
 from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer
 from common.tools.context_queries.Device import get_devices_in_topology
@@ -23,6 +23,7 @@ from common.tools.context_queries.Link import get_links_in_topology
 from common.tools.context_queries.InterDomain import is_inter_domain
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.service.algorithms.Factory import get_algorithm
 
@@ -30,7 +31,7 @@ LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('PathComp', 'RPC')
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+#ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 class PathCompServiceServicerImpl(PathCompServiceServicer):
     def __init__(self) -> None:
@@ -44,18 +45,23 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
 
         context_client = ContextClient()
 
+        context_id = json_context_id(DEFAULT_CONTEXT_NAME)
         if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids):
-            devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
-            links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            #devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            #links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            topology_id = json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id)
         else:
             # TODO: improve filtering of devices and links
             # TODO: add contexts, topologies, and membership of devices/links in topologies
-            devices = context_client.ListDevices(Empty())
-            links = context_client.ListLinks(Empty())
+            #devices = context_client.ListDevices(Empty())
+            #links = context_client.ListLinks(Empty())
+            topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id)
+
+        topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id))
 
         algorithm = get_algorithm(request)
-        algorithm.add_devices(devices)
-        algorithm.add_links(links)
+        algorithm.add_devices(topology_details.devices)
+        algorithm.add_links(topology_details.links)
         algorithm.add_service_requests(request)
 
         #LOGGER.debug('device_list = {:s}'  .format(str(algorithm.device_list  )))
diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
index a6d39ee36949e075323613fceb71da5c77354fe5..144246620e85dd1aaf507efe75e22b62ce942587 100644
--- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
@@ -14,12 +14,10 @@
 
 import operator
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import Connection, Link, Service
-from common.proto.pathcomp_pb2 import Algorithm_KDisjointPath, Algorithm_KShortestPath, PathCompReply, PathCompRequest
+from common.proto.context_pb2 import Link
+from common.proto.pathcomp_pb2 import Algorithm_KDisjointPath, Algorithm_KShortestPath, PathCompRequest
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from pathcomp.frontend.service.algorithms.tools.ComputeSubServices import convert_explicit_path_hops_to_connections
-from pathcomp.frontend.service.algorithms.tools.EroPathToHops import eropath_to_hops
-from ._Algorithm import _Algorithm
+from ._Algorithm import _Algorithm, SRC_END
 from .KShortestPathAlgorithm import KShortestPathAlgorithm
 
 Service_Id          = Tuple[str, str]   # (context_uuid, service_uuid)
@@ -100,7 +98,7 @@ class KDisjointPathAlgorithm(_Algorithm):
     def get_link_from_endpoint(self, endpoint : Dict) -> Tuple[Dict, Link]:
         device_uuid = endpoint['device_id']
         endpoint_uuid = endpoint['endpoint_uuid']
-        item = self.endpoint_to_link_dict.get((device_uuid, endpoint_uuid))
+        item = self.endpoint_to_link_dict.get((device_uuid, endpoint_uuid, SRC_END))
         if item is None:
             MSG = 'Link for Endpoint({:s}, {:s}) not found'
             self.logger.warning(MSG.format(device_uuid, endpoint_uuid))
@@ -141,7 +139,7 @@ class KDisjointPathAlgorithm(_Algorithm):
 
         Path = List[Dict]
         Path_NoPath = Optional[Path] # None = no path, list = path
-        self.json_reply : Dict[Tuple[str, str], List[Path_NoPath]] = dict()
+        service_to_paths : Dict[Tuple[str, str], List[Path_NoPath]] = dict()
 
         for num_path in range(self.num_disjoint):
             algorithm.service_list = list()
@@ -189,66 +187,25 @@ class KDisjointPathAlgorithm(_Algorithm):
             for response in response_list:
                 service_id = response['serviceId']
                 service_key = (service_id['contextId'], service_id['service_uuid'])
-                json_reply_service = self.json_reply.setdefault(service_key, list())
+                json_reply_service = service_to_paths.setdefault(service_key, list())
 
                 no_path_issue = response.get('noPath', {}).get('issue')
-                if no_path_issue is not None:
-                    json_reply_service.append(None)
-                    continue
+                if no_path_issue is not None: continue
 
-                path_endpoints = response['path'][0]['devices']
+                path_endpoints = response['path'][0]
                 json_reply_service.append(path_endpoints)
-                algorithm.link_list = self.remove_traversed_links(algorithm.link_list, path_endpoints)
+                algorithm.link_list = self.remove_traversed_links(algorithm.link_list, path_endpoints['devices'])
+
+        self.json_reply = dict()
+        response_list = self.json_reply.setdefault('response-list', [])
+        for service_key,paths in service_to_paths.items():
+            response = {'serviceId': {
+                'contextId': service_key[0],
+                'service_uuid': service_key[1],
+            }}
+            response['path'] = paths
+            if len(paths) < self.num_disjoint:
+                response['noPath'] = {'issue': 1}
+            response_list.append(response)
 
         self.logger.debug('self.json_reply = {:s}'.format(str(self.json_reply)))
-
-    def get_reply(self) -> PathCompReply:
-        reply = PathCompReply()
-        grpc_services : Dict[Tuple[str, str], Service] = {}
-        grpc_connections : Dict[Tuple[int, str], Connection] = {}
-        for service_key,paths in self.json_reply.items():
-            context_uuid, service_uuid = service_key
-
-            grpc_services[service_key] = self.add_service_to_reply(reply, context_uuid, service_uuid)
-
-            for num_path,service_path_ero in enumerate(paths):
-                self.logger.warning('num_path={:d}'.format(num_path))
-                self.logger.warning('service_path_ero={:s}'.format(str(service_path_ero)))
-                if service_path_ero is None: continue
-                path_hops = eropath_to_hops(service_path_ero, self.endpoint_to_link_dict)
-                self.logger.warning('path_hops={:s}'.format(str(path_hops)))
-                connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid)
-                self.logger.warning('connections={:s}'.format(str(connections)))
-
-                for connection in connections:
-                    connection_uuid,device_layer,path_hops,_ = connection
-
-                    service_key = (context_uuid, connection_uuid)
-                    grpc_service = grpc_services.get(service_key)
-                    if grpc_service is not None: continue
-                    grpc_service = self.add_service_to_reply(
-                        reply, context_uuid, connection_uuid, device_layer=device_layer, path_hops=path_hops)
-                    grpc_services[service_key] = grpc_service
-
-                for connection in connections:
-                    connection_uuid,device_layer,path_hops,dependencies = connection
-
-                    service_key = (context_uuid, connection_uuid)
-                    grpc_service = grpc_services.get(service_key)
-                    if grpc_service is None: raise Exception('Service({:s}) not found'.format(str(service_key)))
-
-                    connection_uuid = '{:s}:{:d}'.format(connection_uuid, num_path)
-                    grpc_connection = grpc_connections.get(connection_uuid)
-                    if grpc_connection is not None: continue
-                    grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops)
-                    grpc_connections[connection_uuid] = grpc_connection
-
-                    for sub_service_uuid in dependencies:
-                        sub_service_key = (context_uuid, sub_service_uuid)
-                        grpc_sub_service = grpc_services.get(sub_service_key)
-                        if grpc_sub_service is None:
-                            raise Exception('Service({:s}) not found'.format(str(sub_service_key)))
-                        grpc_sub_service_id = grpc_connection.sub_service_ids.add()
-                        grpc_sub_service_id.CopyFrom(grpc_sub_service.service_id)
-
-        return reply
diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py
index f26d74ce4c665663735bae69dcfb5a4e14311bfa..5290123b62251a58d8e0a7f273ea23c38ee2cc8a 100644
--- a/src/pathcomp/frontend/tests/Objects_A_B_C.py
+++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py
@@ -80,21 +80,36 @@ DEVICE_C3_ID, DEVICE_C3_ENDPOINTS, DEVICE_C3 = compose_device('C3', ['1', '2', '
 LINK_A2_C3_ID, LINK_A2_C3 = compose_link(DEVICE_A2_ENDPOINTS[2], DEVICE_C3_ENDPOINTS[2])
 LINK_C1_B2_ID, LINK_C1_B2 = compose_link(DEVICE_C1_ENDPOINTS[2], DEVICE_B2_ENDPOINTS[2])
 
+LINK_C3_A2_ID, LINK_C3_A2 = compose_link(DEVICE_C3_ENDPOINTS[2], DEVICE_A2_ENDPOINTS[2])
+LINK_B2_C1_ID, LINK_B2_C1 = compose_link(DEVICE_B2_ENDPOINTS[2], DEVICE_C1_ENDPOINTS[2])
+
 # ----- IntraDomain A Links --------------------------------------------------------------------------------------------
 LINK_A1_A2_ID, LINK_A1_A2 = compose_link(DEVICE_A1_ENDPOINTS[0], DEVICE_A2_ENDPOINTS[0])
 LINK_A1_A3_ID, LINK_A1_A3 = compose_link(DEVICE_A1_ENDPOINTS[1], DEVICE_A3_ENDPOINTS[0])
 LINK_A2_A3_ID, LINK_A2_A3 = compose_link(DEVICE_A2_ENDPOINTS[1], DEVICE_A3_ENDPOINTS[1])
 
+LINK_A2_A1_ID, LINK_A2_A1 = compose_link(DEVICE_A2_ENDPOINTS[0], DEVICE_A1_ENDPOINTS[0])
+LINK_A3_A1_ID, LINK_A3_A1 = compose_link(DEVICE_A3_ENDPOINTS[0], DEVICE_A1_ENDPOINTS[1])
+LINK_A3_A2_ID, LINK_A3_A2 = compose_link(DEVICE_A3_ENDPOINTS[1], DEVICE_A2_ENDPOINTS[1])
+
 # ----- IntraDomain B Links --------------------------------------------------------------------------------------------
 LINK_B1_B2_ID, LINK_B1_B2 = compose_link(DEVICE_B1_ENDPOINTS[0], DEVICE_B2_ENDPOINTS[0])
 LINK_B1_B3_ID, LINK_B1_B3 = compose_link(DEVICE_B1_ENDPOINTS[1], DEVICE_B3_ENDPOINTS[0])
 LINK_B2_B3_ID, LINK_B2_B3 = compose_link(DEVICE_B2_ENDPOINTS[1], DEVICE_B3_ENDPOINTS[1])
 
+LINK_B2_B1_ID, LINK_B2_B1 = compose_link(DEVICE_B2_ENDPOINTS[0], DEVICE_B1_ENDPOINTS[0])
+LINK_B3_B1_ID, LINK_B3_B1 = compose_link(DEVICE_B3_ENDPOINTS[0], DEVICE_B1_ENDPOINTS[1])
+LINK_B3_B2_ID, LINK_B3_B2 = compose_link(DEVICE_B3_ENDPOINTS[1], DEVICE_B2_ENDPOINTS[1])
+
 # ----- IntraDomain C Links --------------------------------------------------------------------------------------------
 LINK_C1_C2_ID, LINK_C1_C2 = compose_link(DEVICE_C1_ENDPOINTS[0], DEVICE_C2_ENDPOINTS[0])
 LINK_C1_C3_ID, LINK_C1_C3 = compose_link(DEVICE_C1_ENDPOINTS[1], DEVICE_C3_ENDPOINTS[0])
 LINK_C2_C3_ID, LINK_C2_C3 = compose_link(DEVICE_C2_ENDPOINTS[1], DEVICE_C3_ENDPOINTS[1])
 
+LINK_C2_C1_ID, LINK_C2_C1 = compose_link(DEVICE_C2_ENDPOINTS[0], DEVICE_C1_ENDPOINTS[0])
+LINK_C3_C1_ID, LINK_C3_C1 = compose_link(DEVICE_C3_ENDPOINTS[0], DEVICE_C1_ENDPOINTS[1])
+LINK_C3_C2_ID, LINK_C3_C2 = compose_link(DEVICE_C3_ENDPOINTS[1], DEVICE_C2_ENDPOINTS[1])
+
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_A1_B1 = compose_service(DEVICE_A1_ENDPOINTS[2], DEVICE_B1_ENDPOINTS[2], constraints=[
     json_constraint_sla_capacity(10.0),
@@ -108,31 +123,38 @@ DEVICES    = [  DEVICE_A1, DEVICE_A2, DEVICE_A3,
                 DEVICE_B1, DEVICE_B2, DEVICE_B3,
                 DEVICE_C1, DEVICE_C2, DEVICE_C3,    ]
 LINKS      = [  LINK_A2_C3, LINK_C1_B2,
+                LINK_C3_A2, LINK_B2_C1,
+
                 LINK_A1_A2, LINK_A1_A3, LINK_A2_A3,
+                LINK_A2_A1, LINK_A3_A1, LINK_A3_A2,
+
                 LINK_B1_B2, LINK_B1_B3, LINK_B2_B3,
-                LINK_C1_C2, LINK_C1_C3, LINK_C2_C3, ]
+                LINK_B2_B1, LINK_B3_B1, LINK_B3_B2,
+
+                LINK_C1_C2, LINK_C1_C3, LINK_C2_C3,
+                LINK_C2_C1, LINK_C3_C1, LINK_C3_C2, ]
 SERVICES   = [  SERVICE_A1_B1]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPOLOGY_ADMIN_ID,
-        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,
-            DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,
-            DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
-        [   LINK_A2_C3_ID, LINK_C1_B2_ID,
-            LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,
-            LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,
-            LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
-    ),
-    (TOPOLOGY_A_ID,
-        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,       ],
-        [   LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,    ],
-    ),
-    (TOPOLOGY_B_ID,
-        [   DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,       ],
-        [   LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,    ],
-    ),
-    (TOPOLOGY_C_ID,
-        [   DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
-        [   LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
-    ),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPOLOGY_ADMIN_ID,
+#        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,
+#            DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,
+#            DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
+#        [   LINK_A2_C3_ID, LINK_C1_B2_ID,
+#            LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,
+#            LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,
+#            LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
+#    ),
+#    (TOPOLOGY_A_ID,
+#        [   DEVICE_A1_ID, DEVICE_A2_ID, DEVICE_A3_ID,       ],
+#        [   LINK_A1_A2_ID, LINK_A1_A3_ID, LINK_A2_A3_ID,    ],
+#    ),
+#    (TOPOLOGY_B_ID,
+#        [   DEVICE_B1_ID, DEVICE_B2_ID, DEVICE_B3_ID,       ],
+#        [   LINK_B1_B2_ID, LINK_B1_B3_ID, LINK_B2_B3_ID,    ],
+#    ),
+#    (TOPOLOGY_C_ID,
+#        [   DEVICE_C1_ID, DEVICE_C2_ID, DEVICE_C3_ID,       ],
+#        [   LINK_C1_C2_ID, LINK_C1_C3_ID, LINK_C2_C3_ID,    ],
+#    ),
+#]
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
index 9ee784e1f76026416bca9824aa8e54e2c4f874f2..053dfd4c45e3822914745905c71f9b64300e1a2f 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
@@ -118,6 +118,11 @@ LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1
 LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
 LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
 
+LINK_CS1GW1_DC1GW_ID, LINK_CS1GW1_DC1GW = compose_link(DEV_CS1GW1_EPS[0], DEV_DC1GW_EPS[0])
+LINK_CS1GW2_DC1GW_ID, LINK_CS1GW2_DC1GW = compose_link(DEV_CS1GW2_EPS[0], DEV_DC1GW_EPS[1])
+LINK_CS2GW1_DC2GW_ID, LINK_CS2GW1_DC2GW = compose_link(DEV_CS2GW1_EPS[0], DEV_DC2GW_EPS[0])
+LINK_CS2GW2_DC2GW_ID, LINK_CS2GW2_DC2GW = compose_link(DEV_CS2GW2_EPS[0], DEV_DC2GW_EPS[1])
+
 # InterDomain CSGW-TN
 LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
 LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
@@ -128,6 +133,15 @@ LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4
 LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
 LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
 
+LINK_TNR1_CS1GW1_ID, LINK_TNR1_CS1GW1 = compose_link(DEV_TNR1_EPS[0], DEV_CS1GW1_EPS[1])
+LINK_TNR2_CS1GW2_ID, LINK_TNR2_CS1GW2 = compose_link(DEV_TNR2_EPS[0], DEV_CS1GW2_EPS[1])
+LINK_TNR2_CS1GW1_ID, LINK_TNR2_CS1GW1 = compose_link(DEV_TNR2_EPS[1], DEV_CS1GW1_EPS[2])
+LINK_TNR1_CS1GW2_ID, LINK_TNR1_CS1GW2 = compose_link(DEV_TNR1_EPS[1], DEV_CS1GW2_EPS[2])
+LINK_TNR3_CS2GW1_ID, LINK_TNR3_CS2GW1 = compose_link(DEV_TNR3_EPS[0], DEV_CS2GW1_EPS[1])
+LINK_TNR4_CS2GW2_ID, LINK_TNR4_CS2GW2 = compose_link(DEV_TNR4_EPS[0], DEV_CS2GW2_EPS[1])
+LINK_TNR4_CS2GW1_ID, LINK_TNR4_CS2GW1 = compose_link(DEV_TNR4_EPS[1], DEV_CS2GW1_EPS[2])
+LINK_TNR3_CS2GW2_ID, LINK_TNR3_CS2GW2 = compose_link(DEV_TNR3_EPS[1], DEV_CS2GW2_EPS[2])
+
 # IntraDomain TN
 LINK_TNR1_TNR2_ID, LINK_TNR1_TNR2 = compose_link(DEV_TNR1_EPS[2], DEV_TNR2_EPS[3])
 LINK_TNR2_TNR3_ID, LINK_TNR2_TNR3 = compose_link(DEV_TNR2_EPS[2], DEV_TNR3_EPS[3])
@@ -136,6 +150,13 @@ LINK_TNR4_TNR1_ID, LINK_TNR4_TNR1 = compose_link(DEV_TNR4_EPS[2], DEV_TNR1_EPS[3
 LINK_TNR1_TNR3_ID, LINK_TNR1_TNR3 = compose_link(DEV_TNR1_EPS[4], DEV_TNR3_EPS[4])
 LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4])
 
+LINK_TNR2_TNR1_ID, LINK_TNR2_TNR1 = compose_link(DEV_TNR2_EPS[3], DEV_TNR1_EPS[2])
+LINK_TNR3_TNR2_ID, LINK_TNR3_TNR2 = compose_link(DEV_TNR3_EPS[3], DEV_TNR2_EPS[2])
+LINK_TNR4_TNR3_ID, LINK_TNR4_TNR3 = compose_link(DEV_TNR4_EPS[3], DEV_TNR3_EPS[2])
+LINK_TNR1_TNR4_ID, LINK_TNR1_TNR4 = compose_link(DEV_TNR1_EPS[3], DEV_TNR4_EPS[2])
+LINK_TNR3_TNR1_ID, LINK_TNR3_TNR1 = compose_link(DEV_TNR3_EPS[4], DEV_TNR1_EPS[4])
+LINK_TNR4_TNR2_ID, LINK_TNR4_TNR2 = compose_link(DEV_TNR4_EPS[4], DEV_TNR2_EPS[4])
+
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
@@ -151,41 +172,44 @@ DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
                 DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4,
             ]
 LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_DC1GW, LINK_CS1GW2_DC1GW, LINK_CS2GW1_DC2GW, LINK_CS2GW2_DC2GW,
+
                 LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
                 LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
                 LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4,
+                LINK_TNR2_TNR1, LINK_TNR3_TNR2, LINK_TNR4_TNR3, LINK_TNR1_TNR4, LINK_TNR3_TNR1, LINK_TNR4_TNR2,
             ]
 SERVICES   = [  SERVICE_DC1GW_DC2GW   ]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPO_ADMIN_ID,
-        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
-            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
-            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-        ],
-        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
-            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
-            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
-            LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
-            LINK_TNR2_TNR4_ID,
-        ],
-    ),
-    (TOPO_DC1_ID,
-        [DEV_DC1GW_ID],
-        []),
-    (TOPO_DC2_ID,
-        [DEV_DC2GW_ID],
-        []),
-    (TOPO_CS1_ID,
-        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
-        []),
-    (TOPO_CS2_ID,
-        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
-        []),
-    (TOPO_TN_ID,
-        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-        ],
-        [   LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
-            LINK_TNR2_TNR4_ID,
-        ]),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPO_ADMIN_ID,
+#        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+#            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+#            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#        ],
+#        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+#            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+#            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+#            LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+#            LINK_TNR2_TNR4_ID,
+#        ],
+#    ),
+#    (TOPO_DC1_ID,
+#        [DEV_DC1GW_ID],
+#        []),
+#    (TOPO_DC2_ID,
+#        [DEV_DC2GW_ID],
+#        []),
+#    (TOPO_CS1_ID,
+#        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+#        []),
+#    (TOPO_CS2_ID,
+#        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+#        []),
+#    (TOPO_TN_ID,
+#        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#        ],
+#        [   LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+#            LINK_TNR2_TNR4_ID,
+#        ]),
+#]
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
index 71510d088746bd791e4671686dd5114874dd5a2a..2c8428568c001a53cbf2c08aa13b61ad14a1bd51 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
@@ -130,6 +130,11 @@ LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1
 LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
 LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
 
+LINK_CS1GW1_DC1GW_ID, LINK_CS1GW1_DC1GW = compose_link(DEV_CS1GW1_EPS[0], DEV_DC1GW_EPS[0])
+LINK_CS1GW2_DC1GW_ID, LINK_CS1GW2_DC1GW = compose_link(DEV_CS1GW2_EPS[0], DEV_DC1GW_EPS[1])
+LINK_CS2GW1_DC2GW_ID, LINK_CS2GW1_DC2GW = compose_link(DEV_CS2GW1_EPS[0], DEV_DC2GW_EPS[0])
+LINK_CS2GW2_DC2GW_ID, LINK_CS2GW2_DC2GW = compose_link(DEV_CS2GW2_EPS[0], DEV_DC2GW_EPS[1])
+
 # InterDomain CSGW-TN
 LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
 LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
@@ -140,12 +145,26 @@ LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4
 LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
 LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
 
+LINK_TNR1_CS1GW1_ID, LINK_TNR1_CS1GW1 = compose_link(DEV_TNR1_EPS[0], DEV_CS1GW1_EPS[1])
+LINK_TNR2_CS1GW2_ID, LINK_TNR2_CS1GW2 = compose_link(DEV_TNR2_EPS[0], DEV_CS1GW2_EPS[1])
+LINK_TNR2_CS1GW1_ID, LINK_TNR2_CS1GW1 = compose_link(DEV_TNR2_EPS[1], DEV_CS1GW1_EPS[2])
+LINK_TNR1_CS1GW2_ID, LINK_TNR1_CS1GW2 = compose_link(DEV_TNR1_EPS[1], DEV_CS1GW2_EPS[2])
+LINK_TNR3_CS2GW1_ID, LINK_TNR3_CS2GW1 = compose_link(DEV_TNR3_EPS[0], DEV_CS2GW1_EPS[1])
+LINK_TNR4_CS2GW2_ID, LINK_TNR4_CS2GW2 = compose_link(DEV_TNR4_EPS[0], DEV_CS2GW2_EPS[1])
+LINK_TNR4_CS2GW1_ID, LINK_TNR4_CS2GW1 = compose_link(DEV_TNR4_EPS[1], DEV_CS2GW1_EPS[2])
+LINK_TNR3_CS2GW2_ID, LINK_TNR3_CS2GW2 = compose_link(DEV_TNR3_EPS[1], DEV_CS2GW2_EPS[2])
+
 # IntraDomain TN
 LINK_TNR1_TOLS_ID, LINK_TNR1_TOLS = compose_link(DEV_TNR1_EPS[2], DEV_TOLS_EPS[0])
 LINK_TNR2_TOLS_ID, LINK_TNR2_TOLS = compose_link(DEV_TNR2_EPS[2], DEV_TOLS_EPS[1])
 LINK_TNR3_TOLS_ID, LINK_TNR3_TOLS = compose_link(DEV_TNR3_EPS[2], DEV_TOLS_EPS[2])
 LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3])
 
+LINK_TOLS_TNR1_ID, LINK_TOLS_TNR1 = compose_link(DEV_TOLS_EPS[0], DEV_TNR1_EPS[2])
+LINK_TOLS_TNR2_ID, LINK_TOLS_TNR2 = compose_link(DEV_TOLS_EPS[1], DEV_TNR2_EPS[2])
+LINK_TOLS_TNR3_ID, LINK_TOLS_TNR3 = compose_link(DEV_TOLS_EPS[2], DEV_TNR3_EPS[2])
+LINK_TOLS_TNR4_ID, LINK_TOLS_TNR4 = compose_link(DEV_TOLS_EPS[3], DEV_TNR4_EPS[2])
+
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
@@ -162,41 +181,47 @@ DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
                 DEV_TOLS,
             ]
 LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_DC1GW, LINK_CS1GW2_DC1GW, LINK_CS2GW1_DC2GW, LINK_CS2GW2_DC2GW,
+
                 LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
                 LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
+                LINK_TNR1_CS1GW1, LINK_TNR2_CS1GW2, LINK_TNR2_CS1GW1, LINK_TNR1_CS1GW2,
+                LINK_TNR3_CS2GW1, LINK_TNR4_CS2GW2, LINK_TNR4_CS2GW1, LINK_TNR3_CS2GW2,
+
                 LINK_TNR1_TOLS, LINK_TNR2_TOLS, LINK_TNR3_TOLS, LINK_TNR4_TOLS,
+                LINK_TOLS_TNR1, LINK_TOLS_TNR2, LINK_TOLS_TNR3, LINK_TOLS_TNR4,
             ]
 SERVICES   = [  SERVICE_DC1GW_DC2GW   ]
 
-OBJECTS_PER_TOPOLOGY = [
-    (TOPO_ADMIN_ID,
-        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
-            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
-            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-            DEV_TOLS_ID,
-        ],
-        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
-            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
-            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
-            LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
-        ],
-    ),
-    (TOPO_DC1_ID,
-        [DEV_DC1GW_ID],
-        []),
-    (TOPO_DC2_ID,
-        [DEV_DC2GW_ID],
-        []),
-    (TOPO_CS1_ID,
-        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
-        []),
-    (TOPO_CS2_ID,
-        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
-        []),
-    (TOPO_TN_ID,
-        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
-            DEV_TOLS_ID,
-        ],
-        [   LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
-        ]),
-]
+#OBJECTS_PER_TOPOLOGY = [
+#    (TOPO_ADMIN_ID,
+#        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+#            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+#            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#            DEV_TOLS_ID,
+#        ],
+#        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+#            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+#            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+#            LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+#        ],
+#    ),
+#    (TOPO_DC1_ID,
+#        [DEV_DC1GW_ID],
+#        []),
+#    (TOPO_DC2_ID,
+#        [DEV_DC2GW_ID],
+#        []),
+#    (TOPO_CS1_ID,
+#        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+#        []),
+#    (TOPO_CS2_ID,
+#        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+#        []),
+#    (TOPO_TN_ID,
+#        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+#            DEV_TOLS_ID,
+#        ],
+#        [   LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+#        ]),
+#]
diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py
index 8088259b80b8ade2669568b74f004dcfa631dd9c..f4e3cbf0f60285b960625a677854c4b7ab4decb9 100644
--- a/src/pathcomp/frontend/tests/test_unitary.py
+++ b/src/pathcomp/frontend/tests/test_unitary.py
@@ -13,12 +13,15 @@
 # limitations under the License.
 
 import copy, logging, os
-from common.proto.context_pb2 import Context, ContextId, DeviceId, Link, LinkId, Topology, Device, TopologyId
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId
 from common.proto.pathcomp_pb2 import PathCompRequest
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Constraint import (
     json_constraint_custom, json_constraint_endpoint_location_region, json_constraint_endpoint_priority,
     json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_latency)
+from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.tools.object_factory.Service import json_service_l3nm_planned
@@ -26,9 +29,9 @@ from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 
 # Scenarios:
-#from .Objects_A_B_C import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
-#from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
-from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
+#from .Objects_A_B_C import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
+#from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
+from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, SERVICES, TOPOLOGIES
 
 # configure backend environment variables before overwriting them with fixtures to use real backend pathcomp
 DEFAULT_PATHCOMP_BACKEND_SCHEME  = 'http'
@@ -58,31 +61,29 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-def test_prepare_environment(
-    context_client : ContextClient):    # pylint: disable=redefined-outer-name
-
-    for context  in CONTEXTS  : context_client.SetContext (Context (**context ))
-    for topology in TOPOLOGIES: context_client.SetTopology(Topology(**topology))
-    for device   in DEVICES   : context_client.SetDevice  (Device  (**device  ))
-    for link     in LINKS     : context_client.SetLink    (Link    (**link    ))
-
-    for topology_id, device_ids, link_ids in OBJECTS_PER_TOPOLOGY:
-        topology = Topology()
-        topology.CopyFrom(context_client.GetTopology(TopologyId(**topology_id)))
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+DESCRIPTORS = {
+    'dummy_mode': True,
+    'contexts'  : CONTEXTS,
+    'topologies': TOPOLOGIES,
+    'devices'   : DEVICES,
+    'links'     : LINKS,
+}
 
-        device_ids_in_topology = {device_id.device_uuid.uuid for device_id in topology.device_ids}
-        func_device_id_not_added = lambda device_id: device_id['device_uuid']['uuid'] not in device_ids_in_topology
-        func_device_id_json_to_grpc = lambda device_id: DeviceId(**device_id)
-        device_ids_to_add = list(map(func_device_id_json_to_grpc, filter(func_device_id_not_added, device_ids)))
-        topology.device_ids.extend(device_ids_to_add)
+def test_prepare_environment(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+) -> None:
+    validate_empty_scenario(context_client)
 
-        link_ids_in_topology = {link_id.link_uuid.uuid for link_id in topology.link_ids}
-        func_link_id_not_added = lambda link_id: link_id['link_uuid']['uuid'] not in link_ids_in_topology
-        func_link_id_json_to_grpc = lambda link_id: LinkId(**link_id)
-        link_ids_to_add = list(map(func_link_id_json_to_grpc, filter(func_link_id_not_added, link_ids)))
-        topology.link_ids.extend(link_ids_to_add)
+    descriptor_loader = DescriptorLoader(descriptors=DESCRIPTORS, context_client=context_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
 
-        context_client.SetTopology(topology)
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
 
 def test_request_service_shortestpath(
     pathcomp_client : PathCompClient):  # pylint: disable=redefined-outer-name
@@ -266,9 +267,15 @@ def test_request_service_kdisjointpath(
 
 
 def test_cleanup_environment(
-    context_client : ContextClient):    # pylint: disable=redefined-outer-name
-
-    for link     in LINKS     : context_client.RemoveLink    (LinkId    (**link    ['link_id'    ]))
-    for device   in DEVICES   : context_client.RemoveDevice  (DeviceId  (**device  ['device_id'  ]))
-    for topology in TOPOLOGIES: context_client.RemoveTopology(TopologyId(**topology['topology_id']))
-    for context  in CONTEXTS  : context_client.RemoveContext (ContextId (**context ['context_id' ]))
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+) -> None:
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors=DESCRIPTORS, context_client=context_client)
+    descriptor_loader.validate()
+    descriptor_loader.unload()
+    validate_empty_scenario(context_client)
diff --git a/src/policy/pom.xml b/src/policy/pom.xml
index 6ea28421abedf6916e998b6cfdebe23c34908c4a..267006311f82c11bce4db29f2d114f30c1832f88 100644
--- a/src/policy/pom.xml
+++ b/src/policy/pom.xml
@@ -179,6 +179,11 @@
             <scope>test</scope>
         </dependency>
 
+        <dependency>
+            <groupId>io.quarkus</groupId>
+            <artifactId>quarkus-smallrye-metrics</artifactId>
+        </dependency>
+
     </dependencies>
 
     <build>
diff --git a/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java b/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
index c10e5dc8b91ee9dcc2ae8aa74526faeb4e4bfcec..30e888d9fab1aae535dca345c7c56e28218bd2c2 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/PolicyGatewayImpl.java
@@ -20,6 +20,9 @@ import context.ContextOuterClass.ServiceId;
 import io.quarkus.grpc.GrpcService;
 import io.smallrye.mutiny.Uni;
 import javax.inject.Inject;
+import org.eclipse.microprofile.metrics.MetricUnits;
+import org.eclipse.microprofile.metrics.annotation.Counted;
+import org.eclipse.microprofile.metrics.annotation.Timed;
 import policy.Policy;
 import policy.Policy.PolicyRuleBasic;
 import policy.Policy.PolicyRuleDevice;
@@ -41,6 +44,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyAddService_counter")
+    @Timed(name = "policy_policyAddService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyAddService(PolicyRuleService request) {
         final var policyRuleService = serializer.deserialize(request);
 
@@ -51,6 +56,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyUpdateService_counter")
+    @Timed(name = "policy_policyUpdateService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyUpdateService(PolicyRuleService request) {
         final var policyRuleService = serializer.deserialize(request);
 
@@ -61,6 +68,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyAddDevice_counter")
+    @Timed(name = "policy_policyAddDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyAddDevice(PolicyRuleDevice request) {
         final var policyRuleDevice = serializer.deserialize(request);
 
@@ -71,6 +80,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyUpdateDevice_counter")
+    @Timed(name = "policy_policyUpdateDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyUpdateDevice(PolicyRuleDevice request) {
         final var policyRuleDevice = serializer.deserialize(request);
 
@@ -81,6 +92,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_policyDelete_counter")
+    @Timed(name = "policy_policyDelete_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleState> policyDelete(PolicyRuleId request) {
         final var policyRuleId = serializer.deserialize(request);
 
@@ -88,6 +101,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyService_counter")
+    @Timed(name = "policy_getPolicyService_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleService> getPolicyService(PolicyRuleId request) {
         final var policyRuleBasic = PolicyRuleBasic.newBuilder().setPolicyRuleId(request).build();
 
@@ -96,6 +111,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyDevice_counter")
+    @Timed(name = "policy_getPolicyDevice_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleDevice> getPolicyDevice(PolicyRuleId request) {
         final var policyRuleBasic = PolicyRuleBasic.newBuilder().setPolicyRuleId(request).build();
 
@@ -104,6 +121,8 @@ public class PolicyGatewayImpl implements PolicyGateway {
     }
 
     @Override
+    @Counted(name = "policy_getPolicyByServiceId_counter")
+    @Timed(name = "policy_getPolicyByServiceId_histogram", unit = MetricUnits.MILLISECONDS)
     public Uni<PolicyRuleServiceList> getPolicyByServiceId(ServiceId request) {
         return Uni.createFrom().item(() -> Policy.PolicyRuleServiceList.newBuilder().build());
     }
diff --git a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
index daee299ddf64327c0d782e640cd1e924e139dccb..ad763e35dfeef71c2f9f73dbf51785a3e03c0e0d 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
@@ -23,5 +23,6 @@ public enum DeviceDriverEnum {
     P4,
     IETF_NETWORK_TOPOLOGY,
     ONF_TR_352,
-    XR
+    XR,
+    IETF_L2VPN
 }
diff --git a/src/policy/src/main/resources/application.yml b/src/policy/src/main/resources/application.yml
index e908f5e36265fb2c3050f1b7e4247847463fc385..38a222d7934751e9eac28854300d55bd631a669c 100644
--- a/src/policy/src/main/resources/application.yml
+++ b/src/policy/src/main/resources/application.yml
@@ -37,6 +37,7 @@ quarkus:
     group: tfs
     name: controller/policy
     registry: labs.etsi.org:5050
+    tag: 0.1.0
 
   kubernetes:
     name: policyservice
@@ -52,14 +53,18 @@ quarkus:
       period: 10s
     ports:
       http:
-        host-port: 8080
+        host-port: 9192
         container-port: 8080
-      grpc:
-        host-port: 6060
-        container-port: 6060
     env:
       vars:
         context-service-host: "contextservice"
         monitoring-service-host: "monitoringservice"
         service-service-host: "serviceservice"
+    resources:
+      requests:
+        cpu: 50m
+        memory: 512Mi
+      limits:
+        cpu: 500m
+        memory: 2048Mi
 
diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
index 64102646119585e1f837b12a9be022d95a29c54f..b0fb90864ce32bf6b793dded5d1f9de1dfba5097 100644
--- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
+++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
@@ -3601,7 +3601,8 @@ class SerializerTest {
                         ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352),
                 Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR),
                 Arguments.of(
-                        DeviceDriverEnum.IETF_L2VPN, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN),
+                        DeviceDriverEnum.IETF_L2VPN,
+                        ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN),
                 Arguments.of(
                         DeviceDriverEnum.UNDEFINED, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED));
     }
diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml
index 40516e5cc3fdd1fb993a1248ad36ea7551edfc40..f1079230f5e5efb75fb14d6cd6f3ad3fb5c9d2e3 100644
--- a/src/policy/target/kubernetes/kubernetes.yml
+++ b/src/policy/target/kubernetes/kubernetes.yml
@@ -4,21 +4,24 @@
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 ---
 apiVersion: v1
 kind: Service
 metadata:
   annotations:
-    app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-    app.quarkus.io/build-timestamp: 2022-11-18 - 12:56:37 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:56:04 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app.kubernetes.io/name: policyservice
     app: policyservice
@@ -26,9 +29,9 @@ metadata:
 spec:
   ports:
     - name: http
-      port: 8080
+      port: 9192
       targetPort: 8080
-    - name: grpc
+    - name: grpc-server
       port: 6060
       targetPort: 6060
   selector:
@@ -39,8 +42,12 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   annotations:
-    app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-    app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000
+    app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+    app.quarkus.io/build-timestamp: 2023-04-04 - 11:56:04 +0000
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /q/metrics
+    prometheus.io/port: "8080"
+    prometheus.io/scheme: http
   labels:
     app: policyservice
     app.kubernetes.io/name: policyservice
@@ -53,8 +60,12 @@ spec:
   template:
     metadata:
       annotations:
-        app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5
-        app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000
+        app.quarkus.io/commit-id: 23832f2975e3c8967e9685f7e3a5f5458d04527a
+        app.quarkus.io/build-timestamp: 2023-04-04 - 11:56:04 +0000
+        prometheus.io/scrape: "true"
+        prometheus.io/path: /q/metrics
+        prometheus.io/port: "8080"
+        prometheus.io/scheme: http
       labels:
         app: policyservice
         app.kubernetes.io/name: policyservice
@@ -89,7 +100,7 @@ spec:
               name: http
               protocol: TCP
             - containerPort: 6060
-              name: grpc
+              name: grpc-server
               protocol: TCP
           readinessProbe:
             failureThreshold: 3
@@ -101,3 +112,10 @@ spec:
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
+          resources:
+            limits:
+              cpu: 500m
+              memory: 2048Mi
+            requests:
+              cpu: 50m
+              memory: 512Mi
diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
index 363983b8653e1cfa553279d2df74d6ac893a4fec..ac44574ad60242b0acf21ba824ea448d5ec30bf1 100644
--- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
+++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
@@ -105,17 +105,17 @@ def teardown_config_rules(
 
     if_cirid_name         = '{:s}.{:s}'.format(endpoint_name, str(circuit_id))
     network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id))
-    #connection_point_id   = 'VC-1'
+    connection_point_id   = 'VC-1'
 
     json_config_rules = [
-        #json_config_rule_delete(
-        #    '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
-        #    {'name': network_instance_name, 'connection_point': connection_point_id}),
+        json_config_rule_delete(
+            '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
+            {'name': network_instance_name, 'connection_point': connection_point_id}),
 
-        #json_config_rule_delete(
-        #    '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
-        #    {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
-        #    'subinterface': sub_interface_index}),
+        json_config_rule_delete(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
+            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name,
+            'subinterface': sub_interface_index}),
 
         json_config_rule_delete(
             '/network_instance[{:s}]'.format(network_instance_name),
diff --git a/src/service/service/service_handlers/p4/p4_service_handler.py b/src/service/service/service_handlers/p4/p4_service_handler.py
index 6f2cfb5a9bc4dac991eecd14ba7b6eb1218bdaa2..8d609c11c9c1c4f25c0d387290c11de36af69a9a 100644
--- a/src/service/service/service_handlers/p4/p4_service_handler.py
+++ b/src/service/service/service_handlers/p4/p4_service_handler.py
@@ -16,18 +16,35 @@
 P4 service handler for the TeraFlowSDN controller.
 """
 
-import anytree, json, logging
-from typing import Any, Dict, List, Optional, Tuple, Union
-from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service
-from common.tools.object_factory.ConfigRule import json_config_rule, json_config_rule_delete, json_config_rule_set
+import logging
+from typing import Any, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
 from common.tools.object_factory.Device import json_device_id
-from common.type_checkers.Checkers import chk_type, chk_length
+from common.type_checkers.Checkers import chk_type
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
-from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value
 from service.service.task_scheduler.TaskExecutor import TaskExecutor
 
 LOGGER = logging.getLogger(__name__)
 
+HISTOGRAM_BUCKETS = (
+    # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
+    0.0010, 0.0025, 0.0050, 0.0075,
+    0.0100, 0.0250, 0.0500, 0.0750,
+    0.1000, 0.2500, 0.5000, 0.7500,
+    1.0000, 2.5000, 5.0000, 7.5000,
+    10.0000, 25.000, 50.0000, 75.000,
+    100.0, INF
+)
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4'})
+METRICS_POOL.get_or_create('SetEndpoint',      MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('DeleteEndpoint',   MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('SetConstraint',    MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('SetConfig',        MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+METRICS_POOL.get_or_create('DeleteConfig',     MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
+
 def create_rule_set(endpoint_a, endpoint_b):
     return json_config_rule_set(
         'table',
@@ -99,6 +116,7 @@ class P4ServiceHandler(_ServiceHandler):
         self.__service = service
         self.__task_executor = task_executor # pylint: disable=unused-private-member
 
+    @metered_subclass_method(METRICS_POOL)
     def SetEndpoint(
         self, endpoints : List[Tuple[str, str, Optional[str]]],
         connection_uuid : Optional[str] = None
@@ -169,6 +187,7 @@ class P4ServiceHandler(_ServiceHandler):
 
         return results
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteEndpoint(
         self, endpoints : List[Tuple[str, str, Optional[str]]],
         connection_uuid : Optional[str] = None
@@ -239,6 +258,7 @@ class P4ServiceHandler(_ServiceHandler):
 
         return results
 
+    @metered_subclass_method(METRICS_POOL)
     def SetConstraint(self, constraints: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Create/Update service constraints.
@@ -261,6 +281,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(constraints)))
         return [True for _ in range(len(constraints))]
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Delete service constraints.
@@ -285,6 +306,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(constraints)))
         return [True for _ in range(len(constraints))]
 
+    @metered_subclass_method(METRICS_POOL)
     def SetConfig(self, resources: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Create/Update configuration for a list of service resources.
@@ -308,6 +330,7 @@ class P4ServiceHandler(_ServiceHandler):
         LOGGER.warning(msg.format(str(resources)))
         return [True for _ in range(len(resources))]
 
+    @metered_subclass_method(METRICS_POOL)
     def DeleteConfig(self, resources: List[Tuple[str, Any]]) \
             -> List[Union[bool, Exception]]:
         """ Delete configuration for a list of service resources.
diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json
index aa76edecd116ee7336fc1a2621d2bc3ae95080ce..b68b9636d58d9c80c4774e4ade557f83796ac5b5 100644
--- a/src/tests/ofc22/descriptors_emulated.json
+++ b/src/tests/ofc22/descriptors_emulated.json
@@ -97,6 +97,35 @@
                 {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}},
                 {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
             ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R1-EMU/13/0/0/aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}},
+                {"device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R2-EMU/13/0/0/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}},
+                {"device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R3-EMU/13/0/0/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}},
+                {"device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "O1-OLS==R4-EMU/13/0/0/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}},
+                {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}
+            ]
         }
     ]
 }
\ No newline at end of file
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 32cefddf3b2a8251623b60fd9fc039588cd6b9bb..75f036befd4bed3bb3bd743b9f423bf21c014e55 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -131,25 +131,18 @@ def topology():
         topology_uuid = session['topology_uuid']
 
         json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
-        grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
+        response = context_client.GetTopologyDetails(TopologyId(**json_topo_id))
 
-        topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}
-        topo_link_uuids   = {link_id  .link_uuid  .uuid for link_id   in grpc_topology.link_ids  }
-
-        response = context_client.ListDevices(Empty())
         devices = []
         for device in response.devices:
-            if device.device_id.device_uuid.uuid not in topo_device_uuids: continue
             devices.append({
                 'id': device.device_id.device_uuid.uuid,
                 'name': device.name,
                 'type': device.device_type,
             })
 
-        response = context_client.ListLinks(Empty())
         links = []
         for link in response.links:
-            if link.link_id.link_uuid.uuid not in topo_link_uuids: continue
             if len(link.link_endpoint_ids) != 2:
                 str_link = grpc_message_to_json_string(link)
                 LOGGER.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link))