diff --git a/deploy/expose_dashboard.sh b/deploy/expose_dashboard.sh
index 60b41c7b75d4f96a22151b1d4d68ba53c75a265c..65f715cabe6d688eb01dc5ae6afe7a21261aca94 100755
--- a/deploy/expose_dashboard.sh
+++ b/deploy/expose_dashboard.sh
@@ -29,11 +29,13 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
 # Automated steps start here
 ########################################################################################################################
 
+MONITORING_NAMESPACE="monitoring"
+
 function expose_dashboard() {
     echo "Prometheus Port Mapping"
     echo ">>> Expose Prometheus HTTP Mgmt GUI port (9090->${PROM_EXT_PORT_HTTP})"
-    PROM_PORT_HTTP=$(kubectl --namespace monitoring get service prometheus-k8s -o 'jsonpath={.spec.ports[?(@.name=="web")].port}')
-    PATCH='{"data": {"'${PROM_EXT_PORT_HTTP}'": "monitoring/prometheus-k8s:'${PROM_PORT_HTTP}'"}}'
+    PROM_PORT_HTTP=$(kubectl --namespace ${MONITORING_NAMESPACE} get service prometheus-k8s -o 'jsonpath={.spec.ports[?(@.name=="web")].port}')
+    PATCH='{"data": {"'${PROM_EXT_PORT_HTTP}'": "'${MONITORING_NAMESPACE}'/prometheus-k8s:'${PROM_PORT_HTTP}'"}}'
     kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
 
     PORT_MAP='{"containerPort": '${PROM_EXT_PORT_HTTP}', "hostPort": '${PROM_EXT_PORT_HTTP}'}'
@@ -44,8 +46,8 @@ function expose_dashboard() {
 
     echo "Grafana Port Mapping"
     echo ">>> Expose Grafana HTTP Mgmt GUI port (3000->${GRAF_EXT_PORT_HTTP})"
-    GRAF_PORT_HTTP=$(kubectl --namespace monitoring get service grafana -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
-    PATCH='{"data": {"'${GRAF_EXT_PORT_HTTP}'": "monitoring/grafana:'${GRAF_PORT_HTTP}'"}}'
+    GRAF_PORT_HTTP=$(kubectl --namespace ${MONITORING_NAMESPACE} get service grafana -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+    PATCH='{"data": {"'${GRAF_EXT_PORT_HTTP}'": "'${MONITORING_NAMESPACE}'/grafana:'${GRAF_PORT_HTTP}'"}}'
     kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
 
     PORT_MAP='{"containerPort": '${GRAF_EXT_PORT_HTTP}', "hostPort": '${GRAF_EXT_PORT_HTTP}'}'
@@ -55,4 +57,9 @@ function expose_dashboard() {
     echo
 }
 
-expose_dashboard
+if kubectl get namespace ${MONITORING_NAMESPACE} &> /dev/null; then
+    echo ">>> Namespace ${MONITORING_NAMESPACE} is present, exposing dashboard..."
+    expose_dashboard
+else
+    echo ">>> Namespace ${MONITORING_NAMESPACE} is NOT present, skipping expose dashboard..."
+fi
diff --git a/deploy/mock_blockchain.sh b/deploy/mock_blockchain.sh
index ef7811c87eabdcb7cf95db2e4cf1a6eee52ef6ca..74d62cd526a38298c8f197fedf0c0169dbb8efca 100755
--- a/deploy/mock_blockchain.sh
+++ b/deploy/mock_blockchain.sh
@@ -38,20 +38,21 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/manifests"
 mkdir -p $TMP_MANIFESTS_FOLDER
-TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+TMP_LOGS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/logs"
 mkdir -p $TMP_LOGS_FOLDER
 
 echo "Deleting and Creating a new namespace..."
-kubectl delete namespace $K8S_NAMESPACE
+kubectl delete namespace $K8S_NAMESPACE --ignore-not-found
 kubectl create namespace $K8S_NAMESPACE
 printf "\n"
 
 echo "Deploying components and collecting environment variables..."
 ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh
-echo "# Environment variables for TeraFlow Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
+echo "# Environment variables for TeraFlowSDN Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
 PYTHONPATH=$(pwd)/src
+echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
 
 echo "Processing '$COMPONENT' component..."
 IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
@@ -77,12 +78,12 @@ cp ./manifests/"${COMPONENT}".yaml "$MANIFEST"
 
 if [ -n "$REGISTRY_IMAGE" ]; then
     # Registry is set
-    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
     sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
     sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
 else
     # Registry is not set
-    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
     sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
     sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
 fi
@@ -91,8 +92,8 @@ echo "  Deploying '$COMPONENT' component to Kubernetes..."
 DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
 kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
 COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
-kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
-kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
+#kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
+#kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
 
 echo "  Collecting env-vars for '$COMPONENT' component..."
 SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json)
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index e6a0c0c1053b69462a0e60c6b6cebe28a7dc59af..95d882c8ba06d53c724559800440a788b7496555 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -327,7 +327,12 @@ echo "Deploying extra manifests..."
 for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
     echo "Processing manifest '$EXTRA_MANIFEST'..."
     if [[ "$EXTRA_MANIFEST" == *"servicemonitor"* ]]; then
-        kubectl apply -f $EXTRA_MANIFEST
+        if kubectl get namespace monitoring &> /dev/null; then
+            echo ">>> Namespace monitoring is present, applying service monitors..."
+            kubectl apply -f $EXTRA_MANIFEST
+        else
+            echo ">>> Namespace monitoring is NOT present, skipping service monitors..."
+        fi
     else
         kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
     fi
@@ -343,7 +348,7 @@ for COMPONENT in $TFS_COMPONENTS; do
     printf "\n"
 done
 
-if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
+if [[ "$TFS_COMPONENTS" == *"monitoring"* ]] && [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     echo "Configuring WebUI DataStores and Dashboards..."
     sleep 5
 
diff --git a/hackfest/containerlab/commands.txt b/hackfest/containerlab/commands.txt
index 18c629c0af2fe176a34f9b08a16405731c185243..df5fbc0ce0163f4ce06b862e90e29854dbae204a 100644
--- a/hackfest/containerlab/commands.txt
+++ b/hackfest/containerlab/commands.txt
@@ -60,22 +60,24 @@ docker exec -it clab-tfs-scenario-client2 bash
 $ sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)"
 
 ## gNMI Capabilities request
-$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities
+$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities
 
 ## gNMI Get request
-$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name
-$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interface[name=mgmt0]
+$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
+$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interfaces/interface[name=mgmt0]
+
 
 ## gNMI Set request
-$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/name/host-name --update-value slr11
+$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/config/hostname --update-value srl11
 
-(we check the changed value) 
-$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name 
+(we check the changed value)
+$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
 
 ## Subscribe request
-$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interface[name=mgmt0]/statistics
+$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interfaces/interface[name=mgmt0]/state/
+
 (In another terminal, you can generate traffic) 
-$ssh admin@clab-srlinux-srl1
+$ssh admin@clab-tfs-scenario-srl1
 
 
 
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index a99e3e5884745aa1971e5ad7f914901e38f75d47..77e421f297fefc8979b9cd6e3415c58be611a53a 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -44,7 +44,7 @@ spec:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:2020"]
           failureThreshold: 30
-          periodSeconds: 10
+          periodSeconds: 1
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:2020"]
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index ba408bb40d4af74e6be53300395735786b9c843d..43caa9f04b56d6477d82c5a5bb22cb292eec8a90 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -74,26 +74,27 @@ spec:
         - name: GF_SERVER_SERVE_FROM_SUB_PATH
           value: "true"
         readinessProbe:
-          failureThreshold: 3
+          failureThreshold: 60
           httpGet:
-            path: /robots.txt
+            #path: /robots.txt
+            path: /login
             port: 3000
             scheme: HTTP
-          initialDelaySeconds: 10
-          periodSeconds: 30
+          initialDelaySeconds: 1
+          periodSeconds: 1
           successThreshold: 1
           timeoutSeconds: 2
         livenessProbe:
-          failureThreshold: 3
-          initialDelaySeconds: 30
-          periodSeconds: 10
+          failureThreshold: 60
+          initialDelaySeconds: 1
+          periodSeconds: 1
           successThreshold: 1
           tcpSocket:
             port: 3000
           timeoutSeconds: 1
         resources:
           requests:
-            cpu: 150m
+            cpu: 250m
             memory: 512Mi
           limits:
             cpu: 500m
diff --git a/proto/context.proto b/proto/context.proto
index d1b7bada6e8dea369a55d9a7cb2fd47ede055e7c..22e11bc68b840115a19551958ac322acb71fb9a4 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -34,14 +34,6 @@ service ContextService {
   rpc RemoveTopology     (TopologyId    ) returns (       Empty           ) {}
   rpc GetTopologyEvents  (Empty         ) returns (stream TopologyEvent   ) {}
 
-  rpc ListInventoryIds   (Empty         ) returns (       InventoryIdList ) {}
-  rpc ListInventory      (Empty         ) returns (       InventoryList   ) {}
-  rpc ListInventoryNames (InventoryIdList) returns(     InventoryNameList ) {}
-  rpc GetInventory       (InventoryId   ) returns (       Inventory       ) {}
-  rpc SetInventory       (Inventory     ) returns (       InventoryId     ) {}
-  rpc RemoveInventory    (InventoryId   ) returns (       Empty           ) {}
-  rpc GetInventoryEvents (InventoryId   ) returns (stream InventoryEvent  ) {}
-
   rpc ListDeviceIds      (Empty         ) returns (       DeviceIdList    ) {}
   rpc ListDevices        (Empty         ) returns (       DeviceList      ) {}
   rpc GetDevice          (DeviceId      ) returns (       Device          ) {}
@@ -182,17 +174,17 @@ message Device {
   DeviceOperationalStatusEnum device_operational_status = 5;
   repeated DeviceDriverEnum device_drivers = 6;
   repeated EndPoint device_endpoints = 7;
-  // Used for inventory:
-  map<string, Component> components = 8; // dict[comp.name => Component]
-    DeviceId controller_id = 9; // Identifier of node controlling the actual device
+  repeated Component components = 8; // Used for inventory
+  DeviceId controller_id = 9; // Identifier of node controlling the actual device
 }
 
-message Component {
-  Uuid uuid = 1;
-  string name = 2;
-  string type = 3;
-  repeated string child = 4; // list[sub-component.name]
-  map<string, string> attributes = 5; // dict[attr.name => json.dumps(attr.value)]
+message Component {                         //Defined previously to this section - Tested OK
+  Uuid component_uuid   = 1;
+  string name           = 2;
+  string type           = 3;
+  
+  map<string, string> attributes = 4; // dict[attr.name => json.dumps(attr.value)]
+  string parent         = 5;
 }
 
 message DeviceConfig {
@@ -205,7 +197,7 @@ enum DeviceDriverEnum {
   DEVICEDRIVER_TRANSPORT_API = 2;
   DEVICEDRIVER_P4 = 3;
   DEVICEDRIVER_IETF_NETWORK_TOPOLOGY = 4;
-  DEVICEDRIVER_ONF_TR_352 = 5;
+  DEVICEDRIVER_ONF_TR_532 = 5;
   DEVICEDRIVER_XR = 6;
   DEVICEDRIVER_IETF_L2VPN = 7;
   DEVICEDRIVER_GNMI_OPENCONFIG = 8;
@@ -611,42 +603,3 @@ message AuthenticationResult {
   ContextId context_id = 1;
   bool authenticated = 2;
 }
-
-// -----Inventory ------------------------------------------------------------------------------------------------------
-message InventoryId{
-  TopologyId topology_id = 1;
-  DeviceId device_id = 2;
-  Uuid inventory_uuid = 3;
-}
-
-message Inventory{
-  InventoryId inventory_id = 1;
-  string name = 2;
-  string inventory_type = 3;
-  repeated kpi_sample_types.KpiSampleType kpi_sample_types = 4;
-  string datos = 5;
-}
-
-message InventoryName {
-  InventoryId inventory_id = 1;
-  string device_name = 2;
-  string inventory_name = 3;
-  string inventory_type = 4;
-}
-
-message InventoryIdList {
-  repeated InventoryId inventory_ids = 1;
-}
-
-message InventoryNameList {
-  repeated InventoryName inventory_names = 1;
-}
-
-message InventoryList {
-  repeated Inventory inventory = 1;
-}
-
-message InventoryEvent {
-  Inventory event = 1;
-  InventoryId inventory_id = 2;
-}
\ No newline at end of file
diff --git a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
index b0729aa55b25da030f9722330e22a0976a3d007f..3d5c93b2c60da6872bca019936eba5524191ad12 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
@@ -849,8 +849,8 @@ public class Serializer {
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_P4;
             case IETF_NETWORK_TOPOLOGY:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY;
-            case ONF_TR_352:
-                return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352;
+            case ONF_TR_532:
+                return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532;
             case XR:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR;
             case IETF_L2VPN:
@@ -872,8 +872,8 @@ public class Serializer {
                 return DeviceDriverEnum.P4;
             case DEVICEDRIVER_IETF_NETWORK_TOPOLOGY:
                 return DeviceDriverEnum.IETF_NETWORK_TOPOLOGY;
-            case DEVICEDRIVER_ONF_TR_352:
-                return DeviceDriverEnum.ONF_TR_352;
+            case DEVICEDRIVER_ONF_TR_532:
+                return DeviceDriverEnum.ONF_TR_532;
             case DEVICEDRIVER_XR:
                 return DeviceDriverEnum.XR;
             case DEVICEDRIVER_IETF_L2VPN:
diff --git a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java
index 3a26937e79d0df2cfead305a10ccadf3c54eae89..c1e1491c93a6aaa8b094b1e65a556c3908a08dc0 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java
@@ -22,7 +22,7 @@ public enum DeviceDriverEnum {
     TRANSPORT_API,
     P4,
     IETF_NETWORK_TOPOLOGY,
-    ONF_TR_352,
+    ONF_TR_532,
     XR,
     IETF_L2VPN
 }
diff --git a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
index d2257d1b34e4753aff620e9bbc15d941f99ae3ba..7351d16a90db438bad4535fd3d413c5c48e91b17 100644
--- a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
+++ b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
@@ -1212,8 +1212,8 @@ class SerializerTest {
                         DeviceDriverEnum.IETF_NETWORK_TOPOLOGY,
                         ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY),
                 Arguments.of(
-                        DeviceDriverEnum.ONF_TR_352,
-                        ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352),
+                        DeviceDriverEnum.ONF_TR_532,
+                        ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532),
                 Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR),
                 Arguments.of(
                         DeviceDriverEnum.IETF_L2VPN,
diff --git a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
index a605e30de68ae66866a78d53863412937ceea890..be75963507751fbf5a8a1c92101de4023fd50e63 100644
--- a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
+++ b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
@@ -170,9 +170,9 @@ public final class ContextOuterClass {
      */
     DEVICEDRIVER_IETF_NETWORK_TOPOLOGY(4),
     /**
-     * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code>
+     * <code>DEVICEDRIVER_ONF_TR_532 = 5;</code>
      */
-    DEVICEDRIVER_ONF_TR_352(5),
+    DEVICEDRIVER_ONF_TR_532(5),
     /**
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
@@ -213,9 +213,9 @@ public final class ContextOuterClass {
      */
     public static final int DEVICEDRIVER_IETF_NETWORK_TOPOLOGY_VALUE = 4;
     /**
-     * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code>
+     * <code>DEVICEDRIVER_ONF_TR_532 = 5;</code>
      */
-    public static final int DEVICEDRIVER_ONF_TR_352_VALUE = 5;
+    public static final int DEVICEDRIVER_ONF_TR_532_VALUE = 5;
     /**
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
@@ -259,7 +259,7 @@ public final class ContextOuterClass {
         case 2: return DEVICEDRIVER_TRANSPORT_API;
         case 3: return DEVICEDRIVER_P4;
         case 4: return DEVICEDRIVER_IETF_NETWORK_TOPOLOGY;
-        case 5: return DEVICEDRIVER_ONF_TR_352;
+        case 5: return DEVICEDRIVER_ONF_TR_532;
         case 6: return DEVICEDRIVER_XR;
         case 7: return DEVICEDRIVER_IETF_L2VPN;
         case 8: return DEVICEDRIVER_GNMI_OPENCONFIG;
@@ -74215,7 +74215,7 @@ public final class ContextOuterClass {
       "RIVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER_TRANS" +
       "PORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n\"DEVIC" +
       "EDRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n\027DEVIC" +
-      "EDRIVER_ONF_TR_352\020\005\022\023\n\017DEVICEDRIVER_XR\020" +
+      "EDRIVER_ONF_TR_532\020\005\022\023\n\017DEVICEDRIVER_XR\020" +
       "\006\022\033\n\027DEVICEDRIVER_IETF_L2VPN\020\007\022 \n\034DEVICE" +
       "DRIVER_GNMI_OPENCONFIG\020\010*\217\001\n\033DeviceOpera" +
       "tionalStatusEnum\022%\n!DEVICEOPERATIONALSTA" +
diff --git a/src/common/message_broker/backend/nats/NatsBackendThread.py b/src/common/message_broker/backend/nats/NatsBackendThread.py
index 0bedd2b242f7eeaa1585d0eb41c5a0bd9efe07e5..3ac32b0cb8f7b4be2d693753e39919b82ab3948f 100644
--- a/src/common/message_broker/backend/nats/NatsBackendThread.py
+++ b/src/common/message_broker/backend/nats/NatsBackendThread.py
@@ -49,29 +49,41 @@ class NatsBackendThread(threading.Thread):
         self._publish_queue.put_nowait(Message(topic_name, message_content))
 
     async def _run_subscriber(
-        self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event
+        self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event,
+        ready_event : threading.Event
     ) -> None:
-        LOGGER.info('[_run_subscriber] NATS URI: {:s}'.format(str(self._nats_uri)))
-        client = await nats.connect(servers=[self._nats_uri])
-        LOGGER.info('[_run_subscriber] Connected!')
-        subscription = await client.subscribe(topic_name)
-        LOGGER.info('[_run_subscriber] Subscribed!')
-        while not self._terminate.is_set() and not unsubscribe.is_set():
-            try:
-                message = await subscription.next_msg(timeout)
-            except nats.errors.TimeoutError:
-                continue
-            except asyncio.CancelledError:
-                break
-            out_queue.put(Message(message.subject, message.data.decode('UTF-8')))
-        await subscription.unsubscribe()
-        await client.drain()
+        try:
+            LOGGER.info('[_run_subscriber] NATS URI: {:s}'.format(str(self._nats_uri)))
+            client = await nats.connect(servers=[self._nats_uri])
+            server_version = client.connected_server_version
+            LOGGER.info('[_run_subscriber] Connected! NATS Server version: {:s}'.format(str(repr(server_version))))
+            subscription = await client.subscribe(topic_name)
+            LOGGER.info('[_run_subscriber] Subscribed!')
+            ready_event.set()
+            while not self._terminate.is_set() and not unsubscribe.is_set():
+                try:
+                    message = await subscription.next_msg(timeout)
+                except nats.errors.TimeoutError:
+                    continue
+                except asyncio.CancelledError:
+                    break
+                out_queue.put(Message(message.subject, message.data.decode('UTF-8')))
+            await subscription.unsubscribe()
+            await client.drain()
+        except Exception:   # pylint: disable=broad-exception-caught
+            LOGGER.exception('[_run_subscriber] Unhandled Exception')
 
     def subscribe(
         self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event
     ) -> None:
-        task = self._event_loop.create_task(self._run_subscriber(topic_name, timeout, out_queue, unsubscribe))
+        ready_event = threading.Event()
+        task = self._event_loop.create_task(
+            self._run_subscriber(topic_name, timeout, out_queue, unsubscribe, ready_event)
+        )
         self._tasks.append(task)
+        LOGGER.info('[subscribe] Waiting for subscriber to be ready...')
+        is_ready = ready_event.wait(timeout=120)
+        LOGGER.info('[subscribe] Subscriber is Ready? {:s}'.format(str(is_ready)))
 
     def run(self) -> None:
         asyncio.set_event_loop(self._event_loop)
diff --git a/src/common/tests/MockMessageBroker.py b/src/common/tests/MockMessageBroker.py
index 2eeea74cfc85fd360180c77a2d5b7387d2ef092f..27613a64d3f63a55276bb1c1f82fdb6c20a8e534 100644
--- a/src/common/tests/MockMessageBroker.py
+++ b/src/common/tests/MockMessageBroker.py
@@ -28,10 +28,6 @@ TOPIC_SERVICE    = 'service'
 TOPIC_SLICE      = 'slice'
 TOPIC_TOPOLOGY   = 'topology'
 
-TOPICS = {
-    TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY
-}
-
 CONSUME_TIMEOUT = 0.5 # seconds
 
 class Message(NamedTuple):
diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py
index 667c9ed658cfbe648e345d691523375e1c5f8b79..e5d8ea76d25a81303df5a8e14073e1dcdc103ef0 100644
--- a/src/common/tests/MockServicerImpl_Context.py
+++ b/src/common/tests/MockServicerImpl_Context.py
@@ -263,7 +263,7 @@ class MockServicerImpl_Context(ContextServiceServicer):
             reply_device.CopyFrom(device)
             if exclude_endpoints:    del reply_device.device_endpoints [:] # pylint: disable=no-member
             if exclude_config_rules: del reply_device.device_config.config_rules[:] # pylint: disable=no-member
-            if exclude_components:   del reply_device.component[:] # pylint: disable=no-member
+            if exclude_components:   del reply_device.components[:] # pylint: disable=no-member
             devices.append(reply_device)
                 
         reply = DeviceList(devices=devices) 
diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py
index 15217b8d14fec0137d94aa704e3cd8cb096f4a17..caf03ed0eb5271aa6e00a2c107a06f9e496d37dc 100644
--- a/src/common/tools/context_queries/Topology.py
+++ b/src/common/tools/context_queries/Topology.py
@@ -15,7 +15,7 @@
 import grpc, logging
 from typing import List, Optional
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, Topology, TopologyId
+from common.proto.context_pb2 import ContextId, Topology, TopologyDetails, TopologyId
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology
 from context.client.ContextClient import ContextClient
@@ -23,13 +23,13 @@ from context.client.ContextClient import ContextClient
 LOGGER = logging.getLogger(__name__)
 
 def create_topology(
-    context_client : ContextClient, context_uuid : str, topology_uuid : str
+    context_client : ContextClient, context_uuid : str, topology_uuid : str, name : Optional[str] = None
 ) -> None:
     context_id = ContextId(**json_context_id(context_uuid))
     existing_topology_ids = context_client.ListTopologyIds(context_id)
     existing_topology_uuids = {topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids}
     if topology_uuid in existing_topology_uuids: return
-    context_client.SetTopology(Topology(**json_topology(topology_uuid, context_id=context_id)))
+    context_client.SetTopology(Topology(**json_topology(topology_uuid, context_id=context_id, name=name)))
 
 def create_missing_topologies(
     context_client : ContextClient, context_id : ContextId, topology_uuids : List[str]
@@ -61,3 +61,21 @@ def get_topology(
     except grpc.RpcError:
         #LOGGER.exception('Unable to get topology({:s} / {:s})'.format(str(context_uuid), str(topology_uuid)))
         return None
+
+def get_topology_details(
+        context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
+        rw_copy : bool = False
+    ) -> Optional[Topology]:
+    try:
+        # pylint: disable=no-member
+        topology_id = TopologyId()
+        topology_id.context_id.context_uuid.uuid = context_uuid
+        topology_id.topology_uuid.uuid = topology_uuid
+        ro_topology_details = context_client.GetTopologyDetails(topology_id)
+        if not rw_copy: return ro_topology_details
+        rw_topology_details = TopologyDetails()
+        rw_topology_details.CopyFrom(ro_topology_details)
+        return rw_topology_details
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get topology({:s} / {:s})'.format(str(context_uuid), str(topology_uuid)))
+        return None
diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py
index 25eb42fabf9c670256e9079a060aa38deb3c0f3d..42ea864f3c0c1150c3806f97e67ff3969542ab70 100644
--- a/src/common/type_checkers/Assertions.py
+++ b/src/common/type_checkers/Assertions.py
@@ -31,7 +31,7 @@ def validate_device_driver_enum(message):
         'DEVICEDRIVER_TRANSPORT_API',
         'DEVICEDRIVER_P4',
         'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY',
-        'DEVICEDRIVER_ONF_TR_352',
+        'DEVICEDRIVER_ONF_TR_532',
         'DEVICEDRIVER_XR',
         'DEVICEDRIVER_IETF_L2VPN',
         'DEVICEDRIVER_GNMI_OPENCONFIG',
diff --git a/src/compute/requirements.in b/src/compute/requirements.in
index 08bbf281a385652013adda02a97e64df7a366c43..dc22f64eb79778f89d1e66d9718dfed2c15457a3 100644
--- a/src/compute/requirements.in
+++ b/src/compute/requirements.in
@@ -17,3 +17,4 @@ Flask-HTTPAuth==4.5.0
 Flask-RESTful==0.3.9
 jsonschema==4.4.0
 requests==2.27.1
+werkzeug==2.3.7
\ No newline at end of file
diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py
index 6c744d0dcef67fef1d8ac719eaba9420b530fe58..a9f224e152429624b6b5ce1808f1960a089c08f4 100644
--- a/src/compute/service/__main__.py
+++ b/src/compute/service/__main__.py
@@ -21,6 +21,7 @@ from common.Settings import (
 from .ComputeService import ComputeService
 from .rest_server.RestServer import RestServer
 from .rest_server.nbi_plugins.debug_api import register_debug_api
+from .rest_server.nbi_plugins.etsi_bwm import register_etsi_bwm_api
 from .rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
 from .rest_server.nbi_plugins.ietf_network_slice import register_ietf_nss
 
@@ -59,9 +60,10 @@ def main():
     grpc_service.start()
 
     rest_server = RestServer()
+    register_debug_api(rest_server)
+    register_etsi_bwm_api(rest_server)
     register_ietf_l2vpn(rest_server)  # Registering L2VPN entrypoint
     register_ietf_nss(rest_server)  # Registering NSS entrypoint
-    register_debug_api(rest_server)
     rest_server.start()
 
     # Wait for Ctrl+C or termination signal
diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py
index ffdbba88f077f6490261372f7048b2e2526d8196..5573b7b026b18715f31a91a052c1a5b15c97a5f0 100644
--- a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py
+++ b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py
@@ -12,17 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from flask_restful import Resource
+from flask_restful import Resource, request
 from common.proto.context_pb2 import Empty
 from context.client.ContextClient import ContextClient
+from service.client.ServiceClient import ServiceClient
 from .Tools import (
     format_grpc_to_json, grpc_connection_id, grpc_context_id, grpc_device_id, grpc_link_id, grpc_policy_rule_id,
-    grpc_service_id, grpc_slice_id, grpc_topology_id)
+    grpc_service_id, grpc_service, grpc_slice_id, grpc_topology_id)
 
 class _Resource(Resource):
     def __init__(self) -> None:
         super().__init__()
         self.client = ContextClient()
+        self.service_client = ServiceClient()
 
 class ContextIds(_Resource):
     def get(self):
@@ -60,6 +62,31 @@ class Service(_Resource):
     def get(self, context_uuid : str, service_uuid : str):
         return format_grpc_to_json(self.client.GetService(grpc_service_id(context_uuid, service_uuid)))
 
+    def post(self, context_uuid : str, service_uuid : str):
+        service = request.get_json()['services'][0]
+        return format_grpc_to_json(self.service_client.CreateService(grpc_service(
+            service_uuid = service['service_id']['service_uuid']['uuid'],
+            service_type = service['service_type'],
+            context_uuid = service['service_id']['context_id']['context_uuid']['uuid'],
+        )))
+
+    def put(self, context_uuid : str, service_uuid : str):
+        service = request.get_json()['services'][0]
+        return format_grpc_to_json(self.service_client.UpdateService(grpc_service(
+            service_uuid = service['service_id']['service_uuid']['uuid'],
+            service_type = service['service_type'],
+            context_uuid = service['service_id']['context_id']['context_uuid']['uuid'],
+            status       = service['service_status']['service_status'],
+            endpoint_ids = service['service_endpoint_ids'],
+            constraints  = service['service_constraints'],
+            config_rules = service['service_config']['config_rules']
+        )))
+
+    def delete(self, context_uuid : str, service_uuid : str):
+        return format_grpc_to_json(self.service_client.DeleteService(grpc_service_id(
+            context_uuid, service_uuid,
+        )))
+
 class SliceIds(_Resource):
     def get(self, context_uuid : str):
         return format_grpc_to_json(self.client.ListSliceIds(grpc_context_id(context_uuid)))
diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/Tools.py b/src/compute/service/rest_server/nbi_plugins/debug_api/Tools.py
index f3dff545ba9812ff3f4e13c3da53774af7626014..fd5eb2316d44f4f13e6d8bedef7411beee80c46a 100644
--- a/src/compute/service/rest_server/nbi_plugins/debug_api/Tools.py
+++ b/src/compute/service/rest_server/nbi_plugins/debug_api/Tools.py
@@ -13,15 +13,20 @@
 # limitations under the License.
 
 from flask.json import jsonify
-from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, LinkId, ServiceId, SliceId, TopologyId
+from common.proto.context_pb2 import (
+    ConnectionId, ContextId, DeviceId, LinkId, ServiceId, SliceId, TopologyId, Service, ServiceStatusEnum
+)
 from common.proto.policy_pb2 import PolicyRuleId
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Connection import json_connection_id
 from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.ConfigRule import json_config_rule
+from common.tools.object_factory.Constraint import json_constraint_custom
+from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.Link import json_link_id
 from common.tools.object_factory.PolicyRule import json_policyrule_id
-from common.tools.object_factory.Service import json_service_id
+from common.tools.object_factory.Service import json_service_id, json_service
 from common.tools.object_factory.Slice import json_slice_id
 from common.tools.object_factory.Topology import json_topology_id
 
@@ -44,6 +49,37 @@ def grpc_link_id(link_uuid):
 def grpc_service_id(context_uuid, service_uuid):
     return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid)))
 
+def grpc_service(
+    service_uuid, service_type, context_uuid, status=None, endpoint_ids=None, constraints=None, config_rules=None
+):
+    json_context = json_context_id(context_uuid)
+    json_status = status if status else ServiceStatusEnum.SERVICESTATUS_PLANNED
+    json_endpoints_ids = [
+        json_endpoint_id(
+            json_device_id(endpoint_id['device_id']['device_uuid']['uuid']),
+            endpoint_id['endpoint_uuid']['uuid']
+        )
+        for endpoint_id in endpoint_ids
+    ] if endpoint_ids else []
+    json_constraints = [
+        json_constraint_custom(
+            constraint['custom']['constraint_type'],
+            constraint['custom']['constraint_value']
+        )
+        for constraint in constraints
+    ] if constraints else []
+    json_config_rules = [
+        json_config_rule(
+            config_rule['action'],
+            config_rule['custom']['resource_key'],
+            config_rule['custom']['resource_value']
+        )
+        for config_rule in config_rules
+    ] if config_rules else []
+    return Service(**json_service(
+        service_uuid, service_type, json_context, json_status,
+        json_endpoints_ids, json_constraints, json_config_rules))
+
 def grpc_slice_id(context_uuid, slice_uuid):
     return SliceId(**json_slice_id(slice_uuid, context_id=json_context_id(context_uuid)))
     
diff --git a/src/compute/service/rest_server/nbi_plugins/etsi_bwm/Resources.py b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/Resources.py
new file mode 100644
index 0000000000000000000000000000000000000000..38534b754d6c1ac502b21af55375c63159c57745
--- /dev/null
+++ b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/Resources.py
@@ -0,0 +1,75 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from common.Constants import DEFAULT_CONTEXT_NAME
+from flask_restful import Resource, request
+from context.client.ContextClient import ContextClient
+from service.client.ServiceClient import ServiceClient
+from .Tools import (
+    format_grpc_to_json, grpc_context_id, grpc_service_id, bwInfo_2_service, service_2_bwInfo)
+
+
+
+class _Resource(Resource):
+    def __init__(self) -> None:
+        super().__init__()
+        self.client = ContextClient()
+        self.service_client = ServiceClient()
+
+
+class BwInfo(_Resource):
+    def get(self):
+        service_list = self.client.ListServices(grpc_context_id(DEFAULT_CONTEXT_NAME))
+        bw_allocations = [service_2_bwInfo(service) for service in service_list.services]
+        return bw_allocations
+
+    def post(self):
+        bwinfo = request.get_json()
+        service = bwInfo_2_service(self.client, bwinfo)
+        stripped_service = copy.deepcopy(service)
+
+        stripped_service.ClearField('service_endpoint_ids')
+        stripped_service.ClearField('service_constraints')
+        stripped_service.ClearField('service_config')
+
+        response = format_grpc_to_json(self.service_client.CreateService(stripped_service))
+        response = format_grpc_to_json(self.service_client.UpdateService(service))
+
+        return response
+
+
+class BwInfoId(_Resource):
+
+    def get(self, allocationId: str):
+        service = self.client.GetService(grpc_service_id(DEFAULT_CONTEXT_NAME, allocationId))
+        return service_2_bwInfo(service)
+
+    def put(self, allocationId: str):
+        json_data = request.get_json()
+        service = bwInfo_2_service(self.client, json_data)
+        response = self.service_client.UpdateService(service)
+        return format_grpc_to_json(response)
+
+    def patch(self, allocationId: str):
+        json_data = request.get_json()
+        if not 'appInsId' in json_data:
+            json_data['appInsId'] = allocationId
+        service = bwInfo_2_service(self.client, json_data)
+        response = self.service_client.UpdateService(service)
+        return format_grpc_to_json(response)
+
+    def delete(self, allocationId: str):
+        self.service_client.DeleteService(grpc_service_id(DEFAULT_CONTEXT_NAME, allocationId))
+        return
diff --git a/src/compute/service/rest_server/nbi_plugins/etsi_bwm/Tools.py b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..023d1006cd807ffeeed40d2e1e7273a580431073
--- /dev/null
+++ b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/Tools.py
@@ -0,0 +1,114 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import logging
+import time
+from flask.json import jsonify
+from common.proto.context_pb2 import ContextId, Empty, EndPointId, ServiceId, ServiceTypeEnum, Service, Constraint, Constraint_SLA_Capacity, ConfigRule, ConfigRule_Custom, ConfigActionEnum
+from common.tools.grpc.Tools import grpc_message_to_json
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Service import json_service_id
+
+LOGGER = logging.getLogger(__name__)
+
+
+def service_2_bwInfo(service: Service) -> dict:
+    response = {}
+    # allocationDirection = '??' # String: 00 = Downlink (towards the UE); 01 = Uplink (towards the application/session); 10 = Symmetrical
+    response['appInsId'] = service.service_id.context_id.context_uuid.uuid # String: Application instance identifier
+    for constraint in service.service_constraints:
+        if constraint.WhichOneof('constraint') == 'sla_capacity':
+            response['fixedAllocation'] = str(constraint.sla_capacity.capacity_gbps*1000) # String: Size of requested fixed BW allocation in [bps]
+            break
+
+
+    for config_rule in service.service_config.config_rules:
+        for key in ['allocationDirection', 'fixedBWPriority', 'requestType', 'sourceIp', 'sourcePort', 'dstPort', 'protocol', 'sessionFilter']:
+            if config_rule.custom.resource_key == key:
+                if key != 'sessionFilter':
+                    response[key] = config_rule.custom.resource_value
+                else:
+                    response[key] = json.loads(config_rule.custom.resource_value)
+
+    
+    unixtime = time.time()
+    response['timeStamp'] = { # Time stamp to indicate when the corresponding information elements are sent
+        "seconds": int(unixtime),
+        "nanoseconds": int(unixtime%1*1e9)
+    }
+
+    return response
+
+def bwInfo_2_service(client, bwInfo: dict) -> Service:
+    service = Service()
+
+    for key in ['allocationDirection', 'fixedBWPriority', 'requestType', 'timeStamp', 'sessionFilter']:
+        if key not in bwInfo:
+            continue
+        config_rule = ConfigRule()
+        config_rule.action = ConfigActionEnum.CONFIGACTION_SET
+        config_rule_custom = ConfigRule_Custom()
+        config_rule_custom.resource_key  = key
+        if key != 'sessionFilter':
+            config_rule_custom.resource_value  = str(bwInfo[key])
+        else:
+            config_rule_custom.resource_value  = json.dumps(bwInfo[key])
+        config_rule.custom.CopyFrom(config_rule_custom)
+        service.service_config.config_rules.append(config_rule)
+
+    if 'sessionFilter' in bwInfo:
+        a_ip = bwInfo['sessionFilter'][0]['sourceIp']
+        z_ip = bwInfo['sessionFilter'][0]['dstAddress']
+
+        devices = client.ListDevices(Empty()).devices
+        for device in devices:
+            for cr in device.device_config.config_rules:
+                if cr.WhichOneof('config_rule') == 'custom' and cr.custom.resource_key == '_connect/settings':
+                    for ep in json.loads(cr.custom.resource_value)['endpoints']:
+                        if 'ip' in ep and (ep['ip'] == a_ip or ep['ip'] == z_ip):
+                            ep_id = EndPointId()
+                            ep_id.endpoint_uuid.uuid = ep['uuid']
+                            ep_id.device_id.device_uuid.uuid = device.device_id.device_uuid.uuid
+                            service.service_endpoint_ids.append(ep_id)
+        
+        if len(service.service_endpoint_ids) < 2:
+            LOGGER.error('No endpoints matched')
+            return None
+
+    service.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
+
+    if 'appInsId' in bwInfo:
+        service.service_id.service_uuid.uuid = bwInfo['appInsId']
+        service.service_id.context_id.context_uuid.uuid = 'admin'
+        service.name = bwInfo['appInsId']
+
+    if 'fixedAllocation' in bwInfo:
+        capacity = Constraint_SLA_Capacity()
+        capacity.capacity_gbps = float(bwInfo['fixedAllocation'])
+        constraint = Constraint()
+        constraint.sla_capacity.CopyFrom(capacity)
+        service.service_constraints.append(constraint)
+
+    return service
+
+
+def format_grpc_to_json(grpc_reply):
+    return jsonify(grpc_message_to_json(grpc_reply))
+
+def grpc_context_id(context_uuid):
+    return ContextId(**json_context_id(context_uuid))
+
+def grpc_service_id(context_uuid, service_uuid):
+    return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid)))
diff --git a/src/compute/service/rest_server/nbi_plugins/etsi_bwm/__init__.py b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b81a4057a91891bc86bacd34d157a1a61e14934
--- /dev/null
+++ b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from compute.service.rest_server.RestServer import RestServer
+from .Resources import BwInfo, BwInfoId
+
+URL_PREFIX = '/bwm/v1'
+
+# Use 'path' type since some identifiers might contain char '/' and Flask is unable to recognize them in 'string' type.
+RESOURCES = [
+    # (endpoint_name, resource_class, resource_url)
+    ('api.bw_info',         BwInfo,     '/bw_allocations'),
+    ('api.bw_info_id',      BwInfoId,   '/bw_allocations/<path:allocationId>'),
+]
+
+def register_etsi_bwm_api(rest_server : RestServer):
+    for endpoint_name, resource_class, resource_url in RESOURCES:
+        rest_server.add_resource(resource_class, URL_PREFIX + resource_url, endpoint=endpoint_name)
diff --git a/src/compute/service/rest_server/nbi_plugins/etsi_bwm/tests_etsi_bwm.txt b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/tests_etsi_bwm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9cfbe5625de45a044e68a8af8ec8f440423da966
--- /dev/null
+++ b/src/compute/service/rest_server/nbi_plugins/etsi_bwm/tests_etsi_bwm.txt
@@ -0,0 +1,81 @@
+-----------------------GET-----------------------
+
+curl --request GET \
+  --url http://10.1.7.203:80/restconf/bwm/v1/bw_allocations
+
+
+-----------------------POST-----------------------
+curl --request POST \
+  --url http://10.1.7.203:80/restconf/bwm/v1/bw_allocations \
+  --header 'Content-Type: application/json' \
+  --data '{
+  "allocationDirection": "string",
+  "appInsId": "service_uuid",
+  "fixedAllocation": "123",
+  "fixedBWPriority": "SEE_DESCRIPTION",
+  "requestType": 0,
+  "sessionFilter": [
+    {
+      "dstAddress": "192.168.3.2",
+      "dstPort": [
+        "b"
+      ],
+      "protocol": "string",
+      "sourceIp": "192.168.1.2",
+      "sourcePort": [
+        "a"
+      ]
+    }
+  ],
+  "timeStamp": {
+    "nanoSeconds": 1,
+    "seconds": 1
+  }
+}'
+
+
+-----------------------GET2-----------------------
+curl --request GET \
+  --url http://10.1.7.203:80/restconf/bwm/v1/bw_allocations/service_uuid
+
+-----------------------PUT-----------------------
+  curl --request PUT \
+  --url http://10.1.7.203:80/restconf/bwm/v1/bw_allocations/service_uuid \
+  --header 'Content-Type: application/json' \
+  --data '{
+  "allocationDirection": "string",
+  "appInsId": "service_uuid",
+  "fixedAllocation": "123",
+  "fixedBWPriority": "efefe",
+  "requestType": 0,
+  "sessionFilter": [
+    {
+      "dstAddress": "192.168.3.2",
+      "dstPort": [
+        "b"
+      ],
+      "protocol": "string",
+      "sourceIp": "192.168.1.2",
+      "sourcePort": [
+        "a"
+      ]
+    }
+  ],
+  "timeStamp": {
+    "nanoSeconds": 1,
+    "seconds": 1
+  }
+}'
+
+-----------------------PATCH-----------------------
+curl --request PATCH \
+  --url http://10.1.7.203:80/restconf/bwm/v1/bw_allocations/service_uuid \
+  --header 'Content-Type: application/json' \
+  --data '{
+  "fixedBWPriority": "uuuuuuuuuuuuuu"
+}'
+
+
+-----------------------DELETE-----------------------
+curl --request DELETE \
+  --url http://10.1.7.203:80/restconf/bwm/v1/bw_allocations/service_uuid
\ No newline at end of file
diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py
index 6d540b4945df8516697c957316294a452186ddb1..93f078e75545c93a2cd312cf48e8f64cdeea87ac 100644
--- a/src/context/service/ContextServiceServicerImpl.py
+++ b/src/context/service/ContextServiceServicerImpl.py
@@ -12,14 +12,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, json, logging, sqlalchemy
+import grpc, logging, sqlalchemy
 from typing import Iterator
 from common.message_broker.MessageBroker import MessageBroker
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
     Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
-    Empty, EndPointIdList, EndPointNameList, EventTypeEnum,
+    Empty, EndPointIdList, EndPointNameList,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
     Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
     Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
@@ -33,16 +33,16 @@ from .database.Connection import (
 from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set
 from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_select, device_set
 from .database.EndPoint import endpoint_list_names
+from .database.Events import EventTopicEnum, consume_events
 from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set
 from .database.PolicyRule import (
     policyrule_delete, policyrule_get, policyrule_list_ids, policyrule_list_objs, policyrule_set)
-from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_select, service_set, service_unset
-from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_select, slice_set, slice_unset
+from .database.Service import (
+    service_delete, service_get, service_list_ids, service_list_objs, service_select, service_set, service_unset)
+from .database.Slice import (
+    slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_select, slice_set, slice_unset)
 from .database.Topology import (
     topology_delete, topology_get, topology_get_details, topology_list_ids, topology_list_objs, topology_set)
-from .Events import (
-    CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE,
-    TOPIC_SLICE, TOPIC_TOPOLOGY, notify_event)
 
 LOGGER = logging.getLogger(__name__)
 
@@ -62,308 +62,237 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListContextIds(self, request : Empty, context : grpc.ServicerContext) -> ContextIdList:
-        return ContextIdList(context_ids=context_list_ids(self.db_engine))
+        return context_list_ids(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListContexts(self, request : Empty, context : grpc.ServicerContext) -> ContextList:
-        return ContextList(contexts=context_list_objs(self.db_engine))
+        return context_list_objs(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetContext(self, request : ContextId, context : grpc.ServicerContext) -> Context:
-        return Context(**context_get(self.db_engine, request))
+        return context_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId:
-        context_id,updated = context_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id})
-        return ContextId(**context_id)
+        return context_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty:
-        context_id,deleted = context_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id})
-        return Empty()
+        return context_delete(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
-        for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT):
-            yield ContextEvent(**json.loads(message.content))
+        for message in consume_events(self.messagebroker, {EventTopicEnum.CONTEXT}): yield message
 
 
     # ----- Topology ---------------------------------------------------------------------------------------------------
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListTopologyIds(self, request : ContextId, context : grpc.ServicerContext) -> TopologyIdList:
-        return TopologyIdList(topology_ids=topology_list_ids(self.db_engine, request))
+        return topology_list_ids(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListTopologies(self, request : ContextId, context : grpc.ServicerContext) -> TopologyList:
-        return TopologyList(topologies=topology_list_objs(self.db_engine, request))
+        return topology_list_objs(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Topology:
-        return Topology(**topology_get(self.db_engine, request))
+        return topology_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetTopologyDetails(self, request : TopologyId, context : grpc.ServicerContext) -> TopologyDetails:
-        return TopologyDetails(**topology_get_details(self.db_engine, request))
+        return topology_get_details(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId:
-        topology_id,updated = topology_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id})
-        return TopologyId(**topology_id)
+        return topology_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty:
-        topology_id,deleted = topology_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id})
-        return Empty()
+        return topology_delete(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetTopologyEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
-        for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT):
-            yield TopologyEvent(**json.loads(message.content))
+        for message in consume_events(self.messagebroker, {EventTopicEnum.TOPOLOGY}): yield message
 
 
     # ----- Device -----------------------------------------------------------------------------------------------------
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListDeviceIds(self, request : Empty, context : grpc.ServicerContext) -> DeviceIdList:
-        return DeviceIdList(device_ids=device_list_ids(self.db_engine))
+        return device_list_ids(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListDevices(self, request : Empty, context : grpc.ServicerContext) -> DeviceList:
-        return DeviceList(devices=device_list_objs(self.db_engine))
+        return device_list_objs(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetDevice(self, request : ContextId, context : grpc.ServicerContext) -> Device:
-        return Device(**device_get(self.db_engine, request))
+        return device_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId:
-        device_id,updated = device_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id})
-        return DeviceId(**device_id)
+        return device_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty:
-        device_id,deleted = device_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id})
-        return Empty()
+        return device_delete(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SelectDevice(self, request : DeviceFilter, context : grpc.ServicerContext) -> DeviceList:
-        return DeviceList(devices=device_select(self.db_engine, request))
+        return device_select(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
-        for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield DeviceEvent(**json.loads(message.content))
+        for message in consume_events(self.messagebroker, {EventTopicEnum.DEVICE}): yield message
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListEndPointNames(self, request : EndPointIdList, context : grpc.ServicerContext) -> EndPointNameList:
-        return EndPointNameList(endpoint_names=endpoint_list_names(self.db_engine, request))
+        return endpoint_list_names(self.db_engine, request)
 
 
     # ----- Link -------------------------------------------------------------------------------------------------------
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList:
-        return LinkIdList(link_ids=link_list_ids(self.db_engine))
+        return link_list_ids(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList:
-        return LinkList(links=link_list_objs(self.db_engine))
+        return link_list_objs(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link:
-        return Link(**link_get(self.db_engine, request))
+        return link_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId:
-        link_id,updated = link_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id})
-        return LinkId(**link_id)
+        return link_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty:
-        link_id,deleted = link_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id})
-        return Empty()
+        return link_delete(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
-        for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT):
-            yield LinkEvent(**json.loads(message.content))
+        for message in consume_events(self.messagebroker, {EventTopicEnum.LINK}): yield message
 
 
     # ----- Service ----------------------------------------------------------------------------------------------------
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList:
-        return ServiceIdList(service_ids=service_list_ids(self.db_engine, request))
+        return service_list_ids(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList:
-        return ServiceList(services=service_list_objs(self.db_engine, request))
+        return service_list_objs(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service:
-        return Service(**service_get(self.db_engine, request))
+        return service_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId:
-        service_id,updated = service_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
-        return ServiceId(**service_id)
+        return service_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def UnsetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId:
-        service_id,updated = service_unset(self.db_engine, request)
-        if updated:
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE
-            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
-        return ServiceId(**service_id)
+        return service_unset(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty:
-        service_id,deleted = service_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
-        return Empty()
+        return service_delete(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SelectService(self, request : ServiceFilter, context : grpc.ServicerContext) -> ServiceList:
-        return ServiceList(services=service_select(self.db_engine, request))
+        return service_select(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
-        for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield ServiceEvent(**json.loads(message.content))
+        for message in consume_events(self.messagebroker, {EventTopicEnum.SERVICE}): yield message
 
 
     # ----- Slice ----------------------------------------------------------------------------------------------------
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList:
-        return SliceIdList(slice_ids=slice_list_ids(self.db_engine, request))
+        return slice_list_ids(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList:
-        return SliceList(slices=slice_list_objs(self.db_engine, request))
+        return slice_list_objs(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice:
-        return Slice(**slice_get(self.db_engine, request))
+        return slice_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
-        slice_id,updated = slice_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
-        return SliceId(**slice_id)
+        return slice_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
-        slice_id,updated = slice_unset(self.db_engine, request)
-        if updated:
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
-        return SliceId(**slice_id)
+        return slice_unset(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty:
-        slice_id,deleted = slice_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
-        return Empty()
+        return slice_delete(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SelectSlice(self, request : SliceFilter, context : grpc.ServicerContext) -> SliceList:
-        return SliceList(slices=slice_select(self.db_engine, request))
+        return slice_select(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
-        for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield SliceEvent(**json.loads(message.content))
+        for message in consume_events(self.messagebroker, {EventTopicEnum.SLICE}): yield message
 
 
     # ----- Connection -------------------------------------------------------------------------------------------------
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList:
-        return ConnectionIdList(connection_ids=connection_list_ids(self.db_engine, request))
+        return connection_list_ids(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ConnectionList:
-        return ConnectionList(connections=connection_list_objs(self.db_engine, request))
+        return connection_list_objs(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection:
-        return Connection(**connection_get(self.db_engine, request))
+        return connection_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId:
-        connection_id,updated = connection_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id})
-        return ConnectionId(**connection_id)
+        return connection_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty:
-        connection_id,deleted = connection_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id})
-        return Empty()
+        return connection_delete(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetConnectionEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
-        for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT):
-            yield ConnectionEvent(**json.loads(message.content))
+        for message in consume_events(self.messagebroker, {EventTopicEnum.CONNECTION}): yield message
 
 
-    # ----- Policy -----------------------------------------------------------------------------------------------------
+    # ----- Policy Rule ------------------------------------------------------------------------------------------------
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList:
-        return PolicyRuleIdList(policyRuleIdList=policyrule_list_ids(self.db_engine))
+        return policyrule_list_ids(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList:
-        return PolicyRuleList(policyRules=policyrule_list_objs(self.db_engine))
+        return policyrule_list_objs(self.db_engine)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule:
-        return PolicyRule(**policyrule_get(self.db_engine, request))
+        return policyrule_get(self.db_engine, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId:
-        policyrule_id,updated = policyrule_set(self.db_engine, request)
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        notify_event(self.messagebroker, TOPIC_POLICY, event_type, {'policyrule_id': policyrule_id})
-        return PolicyRuleId(**policyrule_id)
+        return policyrule_set(self.db_engine, self.messagebroker, request)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty:
-        policyrule_id,deleted = policyrule_delete(self.db_engine, request)
-        if deleted:
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_POLICY, event_type, {'policyrule_id': policyrule_id})
-        return Empty()
+        return policyrule_delete(self.db_engine, self.messagebroker, request)
diff --git a/src/context/service/Events.py b/src/context/service/Events.py
deleted file mode 100644
index 5d20f144c93385c769dbd8526cb10b8088eee728..0000000000000000000000000000000000000000
--- a/src/context/service/Events.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json, time
-from typing import Dict
-from common.message_broker.Message import Message
-from common.message_broker.MessageBroker import MessageBroker
-from common.proto.context_pb2 import EventTypeEnum
-
-TOPIC_CONNECTION = 'connection'
-TOPIC_CONTEXT    = 'context'
-TOPIC_DEVICE     = 'device'
-TOPIC_LINK       = 'link'
-TOPIC_POLICY     = 'policy'
-TOPIC_SERVICE    = 'service'
-TOPIC_SLICE      = 'slice'
-TOPIC_TOPOLOGY   = 'topology'
-
-TOPICS = {
-    TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY
-}
-
-CONSUME_TIMEOUT = 0.5 # seconds
-
-def notify_event(
-    messagebroker : MessageBroker, topic_name : str, event_type : EventTypeEnum, fields : Dict[str, str]
-) -> None:
-    event = {'event': {'timestamp': {'timestamp': time.time()}, 'event_type': event_type}}
-    for field_name, field_value in fields.items():
-        event[field_name] = field_value
-    messagebroker.publish(Message(topic_name, json.dumps(event)))
diff --git a/src/context/service/database/Component.py b/src/context/service/database/Component.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae873855b1c01c80c101eb4e97e3bf7688619fae
--- /dev/null
+++ b/src/context/service/database/Component.py
@@ -0,0 +1,69 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, json, logging
+from sqlalchemy import delete
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.orm import Session
+from typing import Dict, List, Optional, Set
+from common.proto.context_pb2 import Component
+from common.proto.context_pb2 import ConfigRule
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from .models.ComponentModel import ComponentModel
+from .uuids._Builder import get_uuid_from_string
+from .uuids.EndPoint import endpoint_get_uuid
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, selectinload, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from .models.ComponentModel import ComponentModel
+
+LOGGER = logging.getLogger(__name__)
+
+def compose_components_data(
+    components : List[Component], now : datetime.datetime,
+    device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None
+) -> List[Dict]:
+    dict_components : List[Dict] = list()
+    for position,component in enumerate(components):
+        str_kind = component.WhichOneof('config_rule')
+        message  = (grpc_message_to_json_string(getattr(component, str_kind, {})))
+        data     = json.loads(message)
+        resource_key   = data["resource_key"]
+        resource_value = data["resource_value"]
+        if '/inventory' in resource_key:
+            resource_value_data = json.loads(resource_value)
+            name                = resource_value_data.pop('name', None)
+            type_               = resource_value_data.pop('class', None)
+            parent              = resource_value_data.pop('parent-component-references', None)
+            attributes          = resource_value_data.pop('attributes', {})
+            if len(resource_value_data) > 0:
+                LOGGER.warning('Discarding Component Leftovers: {:s}'.format(str(resource_value_data)))
+
+            attributes = {
+                attr_name:json.dumps(attr_value)
+                for attr_name,attr_value in attributes.items()
+            }
+            component_uuid = get_uuid_from_string(component.custom.resource_key, prefix_for_name=device_uuid)            
+            dict_component = {
+                'component_uuid': component_uuid,
+                'device_uuid'   : device_uuid,
+                'name'          : name,
+                'type'          : type_,
+                'attributes'    : json.dumps(attributes),
+                'parent'        : parent,
+                'created_at'    : now,
+                'updated_at'    : now,
+            }
+            dict_components.append(dict_component)
+    return dict_components
diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py
index 80d3b3a6d437986741ee5308205d8a902e897c40..0a659f662c5ca4116211b7939afc3f5fe39b835c 100644
--- a/src/context/service/database/Connection.py
+++ b/src/context/service/database/Connection.py
@@ -19,7 +19,9 @@ from sqlalchemy.exc import IntegrityError
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import Connection, ConnectionId, ServiceId
+from common.proto.context_pb2 import (
+    Connection, ConnectionId, ConnectionIdList, ConnectionList, Empty, EventTypeEnum, ServiceId)
+from common.message_broker.MessageBroker import MessageBroker
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Connection import json_connection_id
@@ -27,17 +29,19 @@ from .models.ConnectionModel import ConnectionEndPointModel, ConnectionModel, Co
 from .uuids.Connection import connection_get_uuid
 from .uuids.EndPoint import endpoint_get_uuid
 from .uuids.Service import service_get_uuid
+from .Events import notify_event_connection
 
 LOGGER = logging.getLogger(__name__)
 
-def connection_list_ids(db_engine : Engine, request : ServiceId) -> List[Dict]:
+def connection_list_ids(db_engine : Engine, request : ServiceId) -> ConnectionIdList:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    connection_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return ConnectionIdList(connection_ids=connection_ids)
 
-def connection_list_objs(db_engine : Engine, request : ServiceId) -> List[Dict]:
+def connection_list_objs(db_engine : Engine, request : ServiceId) -> ConnectionList:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[ConnectionModel] = session.query(ConnectionModel)\
@@ -46,9 +50,10 @@ def connection_list_objs(db_engine : Engine, request : ServiceId) -> List[Dict]:
             .options(selectinload(ConnectionModel.connection_subservices))\
             .filter_by(service_uuid=service_uuid).all()
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    connections = run_transaction(sessionmaker(bind=db_engine), callback)
+    return ConnectionList(connections=connections)
 
-def connection_get(db_engine : Engine, request : ConnectionId) -> Dict:
+def connection_get(db_engine : Engine, request : ConnectionId) -> Connection:
     connection_uuid = connection_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[ConnectionModel] = session.query(ConnectionModel)\
@@ -62,9 +67,9 @@ def connection_get(db_engine : Engine, request : ConnectionId) -> Dict:
         raise NotFoundException('Connection', request.connection_uuid.uuid, extra_details=[
             'connection_uuid generated was: {:s}'.format(connection_uuid),
         ])
-    return obj
+    return Connection(**obj)
 
-def connection_set(db_engine : Engine, request : Connection) -> Tuple[Dict, bool]:
+def connection_set(db_engine : Engine, messagebroker : MessageBroker, request : Connection) -> ConnectionId:
     connection_uuid = connection_get_uuid(request.connection_id, allow_random=True)
     _,service_uuid = service_get_uuid(request.service_id, allow_random=False)
     settings = grpc_message_to_json_string(request.settings),
@@ -143,12 +148,18 @@ def connection_set(db_engine : Engine, request : Connection) -> Tuple[Dict, bool
         return updated
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_connection_id(connection_uuid),updated
+    connection_id = json_connection_id(connection_uuid)
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    notify_event_connection(messagebroker, event_type, connection_id)
+    return ConnectionId(**connection_id)
 
-def connection_delete(db_engine : Engine, request : ConnectionId) -> Tuple[Dict, bool]:
+def connection_delete(db_engine : Engine, messagebroker : MessageBroker, request : ConnectionId) -> Tuple[Dict, bool]:
     connection_uuid = connection_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
         num_deleted = session.query(ConnectionModel).filter_by(connection_uuid=connection_uuid).delete()
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_connection_id(connection_uuid),deleted
+    connection_id = json_connection_id(connection_uuid)
+    if deleted:
+        notify_event_connection(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, connection_id)
+    return Empty()
diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py
index 4654095034749e1de985705b242ba9fa05a82f6a..403dcd2320d0ce925a95e92331a634785cfa2289 100644
--- a/src/context/service/database/Context.py
+++ b/src/context/service/database/Context.py
@@ -17,22 +17,25 @@ from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
-from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import Context, ContextId
+from typing import Dict, List, Optional
+from common.proto.context_pb2 import Context, ContextId, ContextIdList, ContextList, Empty, EventTypeEnum
+from common.message_broker.MessageBroker import MessageBroker
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from .models.ContextModel import ContextModel
 from .uuids.Context import context_get_uuid
+from .Events import notify_event_context
 
 LOGGER = logging.getLogger(__name__)
 
-def context_list_ids(db_engine : Engine) -> List[Dict]:
+def context_list_ids(db_engine : Engine) -> ContextIdList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[ContextModel] = session.query(ContextModel).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    context_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return ContextIdList(context_ids=context_ids)
 
-def context_list_objs(db_engine : Engine) -> List[Dict]:
+def context_list_objs(db_engine : Engine) -> ContextList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[ContextModel] = session.query(ContextModel)\
             .options(selectinload(ContextModel.topologies))\
@@ -40,9 +43,10 @@ def context_list_objs(db_engine : Engine) -> List[Dict]:
             .options(selectinload(ContextModel.slices))\
             .all()
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    contexts = run_transaction(sessionmaker(bind=db_engine), callback)
+    return ContextList(contexts=contexts)
 
-def context_get(db_engine : Engine, request : ContextId) -> Dict:
+def context_get(db_engine : Engine, request : ContextId) -> Context:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[ContextModel] = session.query(ContextModel)\
@@ -57,9 +61,9 @@ def context_get(db_engine : Engine, request : ContextId) -> Dict:
         raise NotFoundException('Context', raw_context_uuid, extra_details=[
             'context_uuid generated was: {:s}'.format(context_uuid)
         ])
-    return obj
+    return Context(**obj)
 
-def context_set(db_engine : Engine, request : Context) -> Tuple[Dict, bool]:
+def context_set(db_engine : Engine, messagebroker : MessageBroker, request : Context) -> ContextId:
     context_name = request.name
     if len(context_name) == 0: context_name = request.context_id.context_uuid.uuid
     context_uuid = context_get_uuid(request.context_id, context_name=context_name, allow_random=True)
@@ -100,12 +104,18 @@ def context_set(db_engine : Engine, request : Context) -> Tuple[Dict, bool]:
         return updated_at > created_at
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_context_id(context_uuid),updated
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    context_id = json_context_id(context_uuid)
+    notify_event_context(messagebroker, event_type, context_id)
+    return ContextId(**context_id)
 
-def context_delete(db_engine : Engine, request : ContextId) -> Tuple[Dict, bool]:
+def context_delete(db_engine : Engine, messagebroker : MessageBroker, request : ContextId) -> Empty:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
         num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete()
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_context_id(context_uuid),deleted
+    context_id = json_context_id(context_uuid)
+    if deleted:
+        notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, context_id)
+    return Empty()
diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py
index 8560399cc705729685cbaa7c10399a0ec7589015..3aff20ade14532dcb7fbf8ec1033c084aaeead3c 100644
--- a/src/context/service/database/Device.py
+++ b/src/context/service/database/Device.py
@@ -19,46 +19,53 @@ from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
-from common.proto.context_pb2 import Device, DeviceFilter, DeviceId, TopologyId
+from common.message_broker.MessageBroker import MessageBroker
+from common.proto.context_pb2 import (
+    Device, DeviceFilter, DeviceId, DeviceIdList, DeviceList, Empty, EventTypeEnum, TopologyId)
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Device import json_device_id
 from context.service.database.uuids.Topology import topology_get_uuid
 from .models.DeviceModel import DeviceModel
 from .models.EndPointModel import EndPointModel
-from .models.TopologyModel import TopologyDeviceModel
+from .models.ComponentModel import ComponentModel
+from .models.TopologyModel import TopologyDeviceModel, TopologyModel
 from .models.enums.DeviceDriver import grpc_to_enum__device_driver
 from .models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status
 from .models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type
 from .uuids.Device import device_get_uuid
 from .uuids.EndPoint import endpoint_get_uuid
 from .ConfigRule import compose_config_rules_data, upsert_config_rules
+from .Component import compose_components_data
+from .Events import notify_event_context, notify_event_device, notify_event_topology
 
 LOGGER = logging.getLogger(__name__)
 
-def device_list_ids(db_engine : Engine) -> List[Dict]:
+def device_list_ids(db_engine : Engine) -> DeviceIdList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[DeviceModel] = session.query(DeviceModel).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    device_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return DeviceIdList(device_ids=device_ids)
 
-def device_list_objs(db_engine : Engine) -> List[Dict]:
+def device_list_objs(db_engine : Engine) -> DeviceList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[DeviceModel] = session.query(DeviceModel)\
             .options(selectinload(DeviceModel.endpoints))\
             .options(selectinload(DeviceModel.config_rules))\
+            .options(selectinload(DeviceModel.components))\
             .all()
-            #.options(selectinload(DeviceModel.components))\
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    devices = run_transaction(sessionmaker(bind=db_engine), callback)
+    return DeviceList(devices=devices)
 
-def device_get(db_engine : Engine, request : DeviceId) -> Dict:
+def device_get(db_engine : Engine, request : DeviceId) -> Device:
     device_uuid = device_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[DeviceModel] = session.query(DeviceModel)\
             .options(selectinload(DeviceModel.endpoints))\
             .options(selectinload(DeviceModel.config_rules))\
+            .options(selectinload(DeviceModel.components))\
             .filter_by(device_uuid=device_uuid).one_or_none()
-            #.options(selectinload(DeviceModel.components))\
         return None if obj is None else obj.dump()
     obj = run_transaction(sessionmaker(bind=db_engine), callback)
     if obj is None:
@@ -66,9 +73,9 @@ def device_get(db_engine : Engine, request : DeviceId) -> Dict:
         raise NotFoundException('Device', raw_device_uuid, extra_details=[
             'device_uuid generated was: {:s}'.format(device_uuid)
         ])
-    return obj
+    return Device(**obj)
 
-def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
+def device_set(db_engine : Engine, messagebroker : MessageBroker, request : Device) -> DeviceId:
     raw_device_uuid = request.device_id.device_uuid.uuid
     raw_device_name = request.name
     device_name = raw_device_uuid if len(raw_device_name) == 0 else raw_device_name
@@ -133,7 +140,8 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
             })
             topology_uuids.add(endpoint_topology_uuid)
 
-    config_rules = compose_config_rules_data(request.device_config.config_rules, now, device_uuid=device_uuid)
+    components_data = compose_components_data(request.device_config.config_rules, now, device_uuid=device_uuid)
+    config_rules    = compose_config_rules_data(request.device_config.config_rules, now, device_uuid=device_uuid)
 
     device_data = [{
         'device_uuid'              : device_uuid,
@@ -148,7 +156,7 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
     if controller_uuid is not None:
         device_data[0]['controller_uuid'] = controller_uuid
 
-    def callback(session : Session) -> bool:
+    def callback(session : Session) -> Tuple[bool, List[Dict]]:
         stmt = insert(DeviceModel).values(device_data)
         stmt = stmt.on_conflict_do_update(
             index_elements=[DeviceModel.device_uuid],
@@ -180,29 +188,103 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
             endpoint_updates = session.execute(stmt).fetchall()
             updated_endpoints = any([(updated_at > created_at) for created_at,updated_at in endpoint_updates])
 
+        device_topology_ids = []
         if not updated or len(related_topologies) > 1:
             # Only update topology-device relations when device is created (not updated) or when endpoints are
             # modified (len(related_topologies) > 1).
-            session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing(
+            stmt = insert(TopologyDeviceModel).values(related_topologies)
+            stmt = stmt.on_conflict_do_nothing(
                 index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid]
-            ))
+            )
+            stmt = stmt.returning(TopologyDeviceModel.topology_uuid)
+            topology_uuids = session.execute(stmt).fetchall()
+
+            LOGGER.warning('RAW topology_uuids={:s}'.format(str(topology_uuids)))
+            if len(topology_uuids) > 0:
+                topology_uuids = [topology_uuid[0] for topology_uuid in topology_uuids]
+                LOGGER.warning('NEW topology_uuids={:s}'.format(str(topology_uuids)))
+                query = session.query(TopologyModel)
+                query = query.filter(TopologyModel.topology_uuid.in_(topology_uuids))
+                device_topologies : List[TopologyModel] = query.all()
+                device_topology_ids = [obj.dump_id() for obj in device_topologies]
+                LOGGER.warning('device_topology_ids={:s}'.format(str(device_topology_ids)))
 
+        updated_components = False
+        
+        if len(components_data) > 0:
+            stmt = insert(ComponentModel).values(components_data)
+            stmt = stmt.on_conflict_do_update(
+                index_elements=[ComponentModel.component_uuid],
+                set_=dict(
+                    name             = stmt.excluded.name,
+                    type             = stmt.excluded.type,
+                    attributes       = stmt.excluded.attributes,
+                    parent           = stmt.excluded.parent,
+                    updated_at       = stmt.excluded.updated_at,
+                )
+            )
+            stmt = stmt.returning(ComponentModel.created_at, ComponentModel.updated_at)
+            component_updates = session.execute(stmt).fetchall()
+            updated_components = any([(updated_at > created_at) for created_at,updated_at in component_updates])
+        
         changed_config_rules = upsert_config_rules(session, config_rules, device_uuid=device_uuid)
 
-        return updated or updated_endpoints or changed_config_rules
+        return updated or updated_endpoints or changed_config_rules, device_topology_ids
 
-    updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_device_id(device_uuid),updated
+    updated, device_topology_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    device_id = json_device_id(device_uuid)
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    notify_event_device(messagebroker, event_type, device_id)
 
-def device_delete(db_engine : Engine, request : DeviceId) -> Tuple[Dict, bool]:
+    context_ids  : Dict[str, Dict] = dict()
+    topology_ids : Dict[str, Dict] = dict()
+    for topology_id in device_topology_ids:
+        topology_uuid = topology_id['topology_uuid']['uuid']
+        topology_ids[topology_uuid] = topology_id
+        context_id = topology_id['context_id']
+        context_uuid = context_id['context_uuid']['uuid']
+        context_ids[context_uuid] = context_id
+
+    for topology_id in topology_ids.values():
+        notify_event_topology(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, topology_id)
+
+    for context_id in context_ids.values():
+        notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+
+    return DeviceId(**device_id)
+
+def device_delete(db_engine : Engine, messagebroker : MessageBroker, request : DeviceId) -> Empty:
     device_uuid = device_get_uuid(request, allow_random=False)
-    def callback(session : Session) -> bool:
+    def callback(session : Session) -> Tuple[bool, List[Dict]]:
+        query = session.query(TopologyDeviceModel)
+        query = query.filter_by(device_uuid=device_uuid)
+        topology_device_list : List[TopologyDeviceModel] = query.all()
+        topology_ids = [obj.topology.dump_id() for obj in topology_device_list]
         num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete()
-        return num_deleted > 0
-    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_device_id(device_uuid),deleted
+        return num_deleted > 0, topology_ids
+    deleted, updated_topology_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    device_id = json_device_id(device_uuid)
+    if deleted:
+        notify_event_device(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, device_id)
+
+        context_ids  : Dict[str, Dict] = dict()
+        topology_ids : Dict[str, Dict] = dict()
+        for topology_id in updated_topology_ids:
+            topology_uuid = topology_id['topology_uuid']['uuid']
+            topology_ids[topology_uuid] = topology_id
+            context_id = topology_id['context_id']
+            context_uuid = context_id['context_uuid']['uuid']
+            context_ids[context_uuid] = context_id
+
+        for topology_id in topology_ids.values():
+            notify_event_topology(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, topology_id)
+
+        for context_id in context_ids.values():
+            notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+
+    return Empty()
 
-def device_select(db_engine : Engine, request : DeviceFilter) -> List[Dict]:
+def device_select(db_engine : Engine, request : DeviceFilter) -> DeviceList:
     device_uuids = [
         device_get_uuid(device_id, allow_random=False)
         for device_id in request.device_ids.device_ids
@@ -219,4 +301,5 @@ def device_select(db_engine : Engine, request : DeviceFilter) -> List[Dict]:
         #if request.include_components  : query = query.options(selectinload(DeviceModel.components))
         obj_list : List[DeviceModel] = query.filter(DeviceModel.device_uuid.in_(device_uuids)).all()
         return [obj.dump(**dump_params) for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    devices = run_transaction(sessionmaker(bind=db_engine), callback)
+    return DeviceList(devices=devices)
\ No newline at end of file
diff --git a/src/context/service/database/EndPoint.py b/src/context/service/database/EndPoint.py
index b0df3bb8101a7b64a148e916178b1c9a77d511af..d7445b951dbd2d846900c21799e2fc03164ae6c5 100644
--- a/src/context/service/database/EndPoint.py
+++ b/src/context/service/database/EndPoint.py
@@ -17,13 +17,13 @@ from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List
-from common.proto.context_pb2 import EndPointIdList
+from common.proto.context_pb2 import EndPointIdList, EndPointNameList
 from .models.EndPointModel import EndPointModel
 from .uuids.EndPoint import endpoint_get_uuid
 
 LOGGER = logging.getLogger(__name__)
 
-def endpoint_list_names(db_engine : Engine, request : EndPointIdList) -> List[Dict]:
+def endpoint_list_names(db_engine : Engine, request : EndPointIdList) -> EndPointNameList:
     endpoint_uuids = {
         endpoint_get_uuid(endpoint_id, allow_random=False)[-1]
         for endpoint_id in request.endpoint_ids
@@ -33,4 +33,5 @@ def endpoint_list_names(db_engine : Engine, request : EndPointIdList) -> List[Di
             .options(selectinload(EndPointModel.device))\
             .filter(EndPointModel.endpoint_uuid.in_(endpoint_uuids)).all()
         return [obj.dump_name() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    endpoint_names = run_transaction(sessionmaker(bind=db_engine), callback)
+    return EndPointNameList(endpoint_names=endpoint_names)
diff --git a/src/context/service/database/Events.py b/src/context/service/database/Events.py
new file mode 100644
index 0000000000000000000000000000000000000000..36774a5170ba20914555b0adc47a5c2faa592799
--- /dev/null
+++ b/src/context/service/database/Events.py
@@ -0,0 +1,89 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, json, logging, time
+from typing import Dict, Iterator, Set
+from common.message_broker.Message import Message
+from common.message_broker.MessageBroker import MessageBroker
+from common.proto.context_pb2 import (
+    ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent)
+
+class EventTopicEnum(enum.Enum):
+    CONNECTION  = 'connection'
+    CONTEXT     = 'context'
+    DEVICE      = 'device'
+    LINK        = 'link'
+    POLICY_RULE = 'policy-rule'
+    SERVICE     = 'service'
+    SLICE       = 'slice'
+    TOPOLOGY    = 'topology'
+
+TOPIC_TO_EVENTCLASS = {
+    EventTopicEnum.CONNECTION.value  : ConnectionEvent,
+    EventTopicEnum.CONTEXT.value     : ContextEvent,
+    EventTopicEnum.DEVICE.value      : DeviceEvent,
+    EventTopicEnum.LINK.value        : LinkEvent,
+    #EventTopicEnum.POLICY_RULE.value : PolicyRuleEvent,  # Not defined in proto files
+    EventTopicEnum.SERVICE.value     : ServiceEvent,
+    EventTopicEnum.SLICE.value       : SliceEvent,
+    EventTopicEnum.TOPOLOGY.value    : TopologyEvent,
+}
+
+CONSUME_TIMEOUT = 0.5 # seconds
+
+LOGGER = logging.getLogger(__name__)
+
+def notify_event(
+    messagebroker : MessageBroker, topic_enum : EventTopicEnum, event_type : EventTypeEnum, fields : Dict[str, str]
+) -> None:
+    event = {'event': {'timestamp': {'timestamp': time.time()}, 'event_type': event_type}}
+    for field_name, field_value in fields.items():
+        event[field_name] = field_value
+    messagebroker.publish(Message(topic_enum.value, json.dumps(event)))
+
+def notify_event_context(messagebroker : MessageBroker, event_type : EventTypeEnum, context_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.CONTEXT, event_type, {'context_id': context_id})
+
+def notify_event_topology(messagebroker : MessageBroker, event_type : EventTypeEnum, topology_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.TOPOLOGY, event_type, {'topology_id': topology_id})
+
+def notify_event_device(messagebroker : MessageBroker, event_type : EventTypeEnum, device_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.DEVICE, event_type, {'device_id': device_id})
+
+def notify_event_link(messagebroker : MessageBroker, event_type : EventTypeEnum, link_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.LINK, event_type, {'link_id': link_id})
+
+def notify_event_service(messagebroker : MessageBroker, event_type : EventTypeEnum, service_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.SERVICE, event_type, {'service_id': service_id})
+
+def notify_event_slice(messagebroker : MessageBroker, event_type : EventTypeEnum, slice_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.SLICE, event_type, {'slice_id': slice_id})
+
+def notify_event_connection(messagebroker : MessageBroker, event_type : EventTypeEnum, connection_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.CONNECTION, event_type, {'connection_id': connection_id})
+
+def notify_event_policy_rule(messagebroker : MessageBroker, event_type : EventTypeEnum, policyrule_id : Dict) -> None:
+    notify_event(messagebroker, EventTopicEnum.POLICY_RULE, event_type, {'policyrule_id': policyrule_id})
+
+def consume_events(
+    messagebroker : MessageBroker, topic_enums : Set[EventTopicEnum], consume_timeout : float = CONSUME_TIMEOUT
+) -> Iterator:
+    topic_names = [topic_enum.value for topic_enum in topic_enums]
+    for message in messagebroker.consume(topic_names, consume_timeout=consume_timeout):
+        event_class = TOPIC_TO_EVENTCLASS.get(message.topic)
+        if event_class is None:
+            MSG = 'No EventClass defined for Topic({:s}). Ignoring...'
+            LOGGER.warning(MSG.format(str(message.topic)))
+            continue
+        yield event_class(**json.loads(message.content))
diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py
index 76db07a9e30b4f62c4b51574ad95c222a1490f79..67ac9f518f610caedc631444187cac10aded56c7 100644
--- a/src/context/service/database/Link.py
+++ b/src/context/service/database/Link.py
@@ -18,32 +18,36 @@ from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
 from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import Link, LinkId, TopologyId
+from common.proto.context_pb2 import Empty, EventTypeEnum, Link, LinkId, LinkIdList, LinkList, TopologyId
+from common.message_broker.MessageBroker import MessageBroker
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.object_factory.Link import json_link_id
 from context.service.database.uuids.Topology import topology_get_uuid
 from .models.LinkModel import LinkModel, LinkEndPointModel
-from .models.TopologyModel import TopologyLinkModel
+from .models.TopologyModel import TopologyLinkModel, TopologyModel
 from .uuids.EndPoint import endpoint_get_uuid
 from .uuids.Link import link_get_uuid
+from .Events import notify_event_context, notify_event_link, notify_event_topology
 
 LOGGER = logging.getLogger(__name__)
 
-def link_list_ids(db_engine : Engine) -> List[Dict]:
+def link_list_ids(db_engine : Engine) -> LinkIdList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[LinkModel] = session.query(LinkModel).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    link_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return LinkIdList(link_ids=link_ids)
 
-def link_list_objs(db_engine : Engine) -> List[Dict]:
+def link_list_objs(db_engine : Engine) -> LinkList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[LinkModel] = session.query(LinkModel)\
             .options(selectinload(LinkModel.link_endpoints))\
             .all()
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    links = run_transaction(sessionmaker(bind=db_engine), callback)
+    return LinkList(links=links)
 
-def link_get(db_engine : Engine, request : LinkId) -> Dict:
+def link_get(db_engine : Engine, request : LinkId) -> Link:
     link_uuid = link_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[LinkModel] = session.query(LinkModel)\
@@ -56,9 +60,9 @@ def link_get(db_engine : Engine, request : LinkId) -> Dict:
         raise NotFoundException('Link', raw_link_uuid, extra_details=[
             'link_uuid generated was: {:s}'.format(link_uuid)
         ])
-    return obj
+    return Link(**obj)
 
-def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]:
+def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link) -> LinkId:
     raw_link_uuid = request.link_id.link_uuid.uuid
     raw_link_name = request.name
     link_name = raw_link_uuid if len(raw_link_name) == 0 else raw_link_name
@@ -102,7 +106,7 @@ def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]:
         'updated_at': now,
     }]
 
-    def callback(session : Session) -> bool:
+    def callback(session : Session) -> Tuple[bool, List[Dict]]:
         stmt = insert(LinkModel).values(link_data)
         stmt = stmt.on_conflict_do_update(
             index_elements=[LinkModel.link_uuid],
@@ -115,28 +119,88 @@ def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]:
         created_at,updated_at = session.execute(stmt).fetchone()
         updated = updated_at > created_at
 
+        updated_endpoints = False
         if len(link_endpoints_data) > 0:
             # TODO: manage add/remove of endpoints; manage changes in relations with topology
             stmt = insert(LinkEndPointModel).values(link_endpoints_data)
             stmt = stmt.on_conflict_do_nothing(
                 index_elements=[LinkEndPointModel.link_uuid, LinkEndPointModel.endpoint_uuid]
             )
-            session.execute(stmt)
-
-        if len(related_topologies) > 0:
-            session.execute(insert(TopologyLinkModel).values(related_topologies).on_conflict_do_nothing(
+            link_endpoint_inserts = session.execute(stmt)
+            updated_endpoints = int(link_endpoint_inserts.rowcount) > 0
+
+        link_topology_ids = []
+        if not updated or len(related_topologies) > 1:
+            # Only update topology-link relations when link is created (not updated) or when endpoint_ids are
+            # modified (len(related_topologies) > 1).
+            stmt = insert(TopologyLinkModel).values(related_topologies)
+            stmt = stmt.on_conflict_do_nothing(
                 index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid]
-            ))
-
-        return updated
-
-    updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_link_id(link_uuid),updated
-
-def link_delete(db_engine : Engine, request : LinkId) -> Tuple[Dict, bool]:
+            )
+            stmt = stmt.returning(TopologyLinkModel.topology_uuid)
+            topology_uuids = session.execute(stmt).fetchall()
+
+            LOGGER.warning('RAW topology_uuids={:s}'.format(str(topology_uuids)))
+            if len(topology_uuids) > 0:
+                topology_uuids = [topology_uuid[0] for topology_uuid in topology_uuids]
+                LOGGER.warning('NEW topology_uuids={:s}'.format(str(topology_uuids)))
+                query = session.query(TopologyModel)
+                query = query.filter(TopologyModel.topology_uuid.in_(topology_uuids))
+                link_topologies : List[TopologyModel] = query.all()
+                link_topology_ids = [obj.dump_id() for obj in link_topologies]
+                LOGGER.warning('link_topology_ids={:s}'.format(str(link_topology_ids)))
+
+        return updated or updated_endpoints, link_topology_ids
+
+    updated, link_topology_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    link_id = json_link_id(link_uuid)
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    notify_event_link(messagebroker, event_type, link_id)
+
+    context_ids  : Dict[str, Dict] = dict()
+    topology_ids : Dict[str, Dict] = dict()
+    for topology_id in link_topology_ids:
+        topology_uuid = topology_id['topology_uuid']['uuid']
+        topology_ids[topology_uuid] = topology_id
+        context_id = topology_id['context_id']
+        context_uuid = context_id['context_uuid']['uuid']
+        context_ids[context_uuid] = context_id
+
+    for topology_id in topology_ids.values():
+        notify_event_topology(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, topology_id)
+
+    for context_id in context_ids.values():
+        notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+
+    return LinkId(**link_id)
+
+def link_delete(db_engine : Engine, messagebroker : MessageBroker, request : LinkId) -> Empty:
     link_uuid = link_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
+        query = session.query(TopologyLinkModel)
+        query = query.filter_by(link_uuid=link_uuid)
+        topology_link_list : List[TopologyLinkModel] = query.all()
+        topology_ids = [obj.topology.dump_id() for obj in topology_link_list]
         num_deleted = session.query(LinkModel).filter_by(link_uuid=link_uuid).delete()
-        return num_deleted > 0
-    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_link_id(link_uuid),deleted
+        return num_deleted > 0, topology_ids
+    deleted, updated_topology_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    link_id = json_link_id(link_uuid)
+    if deleted:
+        notify_event_link(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, link_id)
+
+        context_ids  : Dict[str, Dict] = dict()
+        topology_ids : Dict[str, Dict] = dict()
+        for topology_id in updated_topology_ids:
+            topology_uuid = topology_id['topology_uuid']['uuid']
+            topology_ids[topology_uuid] = topology_id
+            context_id = topology_id['context_id']
+            context_uuid = context_id['context_uuid']['uuid']
+            context_ids[context_uuid] = context_id
+
+        for topology_id in topology_ids.values():
+            notify_event_topology(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, topology_id)
+
+        for context_id in context_ids.values():
+            notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+
+    return Empty()
diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py
index 13f0a2698c17874e1e15f4d6a1d527d366141f56..3db0696a49cd851608d34797ce138ea0e63a1c51 100644
--- a/src/context/service/database/PolicyRule.py
+++ b/src/context/service/database/PolicyRule.py
@@ -12,13 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import datetime, json
+import datetime, json, logging
 from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
-from typing import Dict, List, Optional, Set, Tuple
+from typing import Dict, List, Optional, Set
+from common.proto.context_pb2 import Empty, EventTypeEnum
 from common.proto.policy_pb2 import PolicyRule, PolicyRuleId, PolicyRuleIdList, PolicyRuleList
+from common.message_broker.MessageBroker import MessageBroker
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.PolicyRule import json_policyrule_id
@@ -27,21 +29,26 @@ from .models.enums.PolicyRuleState import grpc_to_enum__policyrule_state
 from .models.PolicyRuleModel import PolicyRuleDeviceModel, PolicyRuleKindEnum, PolicyRuleModel
 from .uuids.PolicuRule import policyrule_get_uuid
 from .uuids.Service import service_get_uuid
+from .Events import notify_event_policy_rule
 
-def policyrule_list_ids(db_engine : Engine) -> List[Dict]:
+LOGGER = logging.getLogger(__name__)
+
+def policyrule_list_ids(db_engine : Engine) -> PolicyRuleIdList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    policy_rule_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return PolicyRuleIdList(policyRuleIdList=policy_rule_ids)
 
-def policyrule_list_objs(db_engine : Engine) -> List[Dict]:
+def policyrule_list_objs(db_engine : Engine) -> PolicyRuleList:
     def callback(session : Session) -> List[Dict]:
         obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel)\
             .options(selectinload(PolicyRuleModel.policyrule_service))\
             .options(selectinload(PolicyRuleModel.policyrule_devices))\
             .all()
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    policy_rules = run_transaction(sessionmaker(bind=db_engine), callback)
+    return PolicyRuleList(policyRules=policy_rules)
 
 def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule:
     policyrule_uuid = policyrule_get_uuid(request, allow_random=False)
@@ -57,9 +64,9 @@ def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule:
         raise NotFoundException('PolicyRule', raw_policyrule_uuid, extra_details=[
             'policyrule_uuid generated was: {:s}'.format(policyrule_uuid)
         ])
-    return obj
+    return PolicyRule(**obj)
 
-def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRuleId, bool]:
+def policyrule_set(db_engine : Engine, messagebroker : MessageBroker, request : PolicyRule) -> PolicyRuleId:
     policyrule_kind = request.WhichOneof('policy_rule')
     policyrule_spec = getattr(request, policyrule_kind)
     policyrule_basic = policyrule_spec.policyRuleBasic
@@ -130,12 +137,18 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule
         return updated
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_policyrule_id(policyrule_uuid),updated
+    policyrule_id = json_policyrule_id(policyrule_uuid)
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    notify_event_policy_rule(messagebroker, event_type, policyrule_id)
+    return PolicyRuleId(**policyrule_id)
 
-def policyrule_delete(db_engine : Engine, request : PolicyRuleId) -> Tuple[Dict, bool]:
+def policyrule_delete(db_engine : Engine, messagebroker : MessageBroker, request : PolicyRuleId) -> Empty:
     policyrule_uuid = policyrule_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
         num_deleted = session.query(PolicyRuleModel).filter_by(policyrule_uuid=policyrule_uuid).delete()
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_policyrule_id(policyrule_uuid),deleted
+    policyrule_id = json_policyrule_id(policyrule_uuid)
+    if deleted:
+        notify_event_policy_rule(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, policyrule_id)
+    return Empty()
diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py
index b6916dc3a19fef4bde3aff93300e63f360b362c0..fc196ddded291aa82c8f9df932c15611d13121e4 100644
--- a/src/context/service/database/Service.py
+++ b/src/context/service/database/Service.py
@@ -18,8 +18,10 @@ from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
-from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import ContextId, Service, ServiceFilter, ServiceId
+from typing import Dict, List, Optional, Set
+from common.proto.context_pb2 import (
+    ContextId, Empty, EventTypeEnum, Service, ServiceFilter, ServiceId, ServiceIdList, ServiceList)
+from common.message_broker.MessageBroker import MessageBroker
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Service import json_service_id
@@ -31,17 +33,19 @@ from .models.ServiceModel import ServiceModel, ServiceEndPointModel
 from .uuids.Context import context_get_uuid
 from .uuids.EndPoint import endpoint_get_uuid
 from .uuids.Service import service_get_uuid
+from .Events import notify_event_context, notify_event_service
 
 LOGGER = logging.getLogger(__name__)
 
-def service_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
+def service_list_ids(db_engine : Engine, request : ContextId) -> ServiceIdList:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    service_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return ServiceIdList(service_ids=service_ids)
 
-def service_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
+def service_list_objs(db_engine : Engine, request : ContextId) -> ServiceList:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[ServiceModel] = session.query(ServiceModel)\
@@ -50,9 +54,10 @@ def service_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
             .options(selectinload(ServiceModel.config_rules))\
             .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    services = run_transaction(sessionmaker(bind=db_engine), callback)
+    return ServiceList(services=services)
 
-def service_get(db_engine : Engine, request : ServiceId) -> Dict:
+def service_get(db_engine : Engine, request : ServiceId) -> Service:
     _,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[ServiceModel] = session.query(ServiceModel)\
@@ -69,9 +74,9 @@ def service_get(db_engine : Engine, request : ServiceId) -> Dict:
             'context_uuid generated was: {:s}'.format(context_uuid),
             'service_uuid generated was: {:s}'.format(service_uuid),
         ])
-    return obj
+    return Service(**obj)
 
-def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
+def service_set(db_engine : Engine, messagebroker : MessageBroker, request : Service) -> ServiceId:
     raw_context_uuid = request.service_id.context_id.context_uuid.uuid
     raw_service_uuid = request.service_id.service_uuid.uuid
     raw_service_name = request.name
@@ -145,9 +150,14 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
         return updated or changed_constraints or changed_config_rules
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_service_id(service_uuid, json_context_id(context_uuid)),updated
-
-def service_unset(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
+    context_id = json_context_id(context_uuid)
+    service_id = json_service_id(service_uuid, context_id=context_id)
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    notify_event_service(messagebroker, event_type, service_id)
+    notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+    return ServiceId(**service_id)
+
+def service_unset(db_engine : Engine, messagebroker : MessageBroker, request : Service) -> ServiceId:
     raw_context_uuid = request.service_id.context_id.context_uuid.uuid
     raw_service_uuid = request.service_id.service_uuid.uuid
     raw_service_name = request.name
@@ -184,17 +194,25 @@ def service_unset(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
         return num_deletes > 0 or changed_constraints or changed_config_rules
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_service_id(service_uuid, json_context_id(context_uuid)),updated
+    service_id = json_service_id(service_uuid, json_context_id(context_uuid))
+    if updated:
+        notify_event_service(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, service_id)
+    return ServiceId(**service_id)
 
-def service_delete(db_engine : Engine, request : ServiceId) -> Tuple[Dict, bool]:
+def service_delete(db_engine : Engine, messagebroker : MessageBroker, request : ServiceId) -> Empty:
     context_uuid,service_uuid = service_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
         num_deleted = session.query(ServiceModel).filter_by(service_uuid=service_uuid).delete()
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_service_id(service_uuid, json_context_id(context_uuid)),deleted
-
-def service_select(db_engine : Engine, request : ServiceFilter) -> List[Dict]:
+    context_id = json_context_id(context_uuid)
+    service_id = json_service_id(service_uuid, context_id=context_id)
+    if deleted:
+        notify_event_service(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, service_id)
+        notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+    return Empty()
+
+def service_select(db_engine : Engine, request : ServiceFilter) -> ServiceList:
     service_uuids = [
         service_get_uuid(service_id, allow_random=False)[1]
         for service_id in request.service_ids.service_ids
@@ -211,4 +229,5 @@ def service_select(db_engine : Engine, request : ServiceFilter) -> List[Dict]:
         if request.include_config_rules: query = query.options(selectinload(ServiceModel.config_rules))
         obj_list : List[ServiceModel] = query.filter(ServiceModel.service_uuid.in_(service_uuids)).all()
         return [obj.dump(**dump_params) for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    services = run_transaction(sessionmaker(bind=db_engine), callback)
+    return ServiceList(services=services)
diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py
index abd140024f2a13289c7af6a3bafe363a8247e053..98a5ef7a8dd5d6f489c11bc2798ea16fc5b9c128 100644
--- a/src/context/service/database/Slice.py
+++ b/src/context/service/database/Slice.py
@@ -18,8 +18,10 @@ from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
-from typing import Dict, List, Optional, Set, Tuple
-from common.proto.context_pb2 import ContextId, Slice, SliceFilter, SliceId
+from typing import Dict, List, Optional, Set
+from common.proto.context_pb2 import (
+    ContextId, Empty, EventTypeEnum, Slice, SliceFilter, SliceId, SliceIdList, SliceList)
+from common.message_broker.MessageBroker import MessageBroker
 from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Slice import json_slice_id
@@ -31,17 +33,19 @@ from .uuids.Context import context_get_uuid
 from .uuids.EndPoint import endpoint_get_uuid
 from .uuids.Service import service_get_uuid
 from .uuids.Slice import slice_get_uuid
+from .Events import notify_event_context, notify_event_slice
 
 LOGGER = logging.getLogger(__name__)
 
-def slice_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
+def slice_list_ids(db_engine : Engine, request : ContextId) -> SliceIdList:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    slice_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return SliceIdList(slice_ids=slice_ids)
 
-def slice_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
+def slice_list_objs(db_engine : Engine, request : ContextId) -> SliceList:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[SliceModel] = session.query(SliceModel)\
@@ -52,9 +56,10 @@ def slice_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
             .options(selectinload(SliceModel.config_rules))\
             .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    slices = run_transaction(sessionmaker(bind=db_engine), callback)
+    return SliceList(slices=slices)
 
-def slice_get(db_engine : Engine, request : SliceId) -> Dict:
+def slice_get(db_engine : Engine, request : SliceId) -> Slice:
     _,slice_uuid = slice_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[SliceModel] = session.query(SliceModel)\
@@ -73,9 +78,9 @@ def slice_get(db_engine : Engine, request : SliceId) -> Dict:
             'context_uuid generated was: {:s}'.format(context_uuid),
             'slice_uuid generated was: {:s}'.format(slice_uuid),
         ])
-    return obj
+    return Slice(**obj)
 
-def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
+def slice_set(db_engine : Engine, messagebroker : MessageBroker, request : Slice) -> SliceId:
     raw_context_uuid = request.slice_id.context_id.context_uuid.uuid
     raw_slice_uuid = request.slice_id.slice_uuid.uuid
     raw_slice_name = request.name
@@ -182,9 +187,14 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
         return updated or changed_constraints or changed_config_rules
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated
-
-def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
+    context_id = json_context_id(context_uuid)
+    slice_id = json_slice_id(slice_uuid, context_id=context_id)
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    notify_event_slice(messagebroker, event_type, slice_id)
+    notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+    return SliceId(**slice_id)
+
+def slice_unset(db_engine : Engine, messagebroker : MessageBroker, request : Slice) -> SliceId:
     raw_context_uuid = request.slice_id.context_id.context_uuid.uuid
     raw_slice_uuid = request.slice_id.slice_uuid.uuid
     raw_slice_name = request.name
@@ -243,17 +253,25 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
         return num_deletes > 0 or changed_constraints or changed_config_rules
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated
+    slice_id = json_slice_id(slice_uuid, json_context_id(context_uuid))
+    if updated:
+        notify_event_slice(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, slice_id)
+    return SliceId(**slice_id)
 
-def slice_delete(db_engine : Engine, request : SliceId) -> Tuple[Dict, bool]:
+def slice_delete(db_engine : Engine, messagebroker : MessageBroker, request : SliceId) -> Empty:
     context_uuid,slice_uuid = slice_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
         num_deleted = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).delete()
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_slice_id(slice_uuid, json_context_id(context_uuid)),deleted
-
-def slice_select(db_engine : Engine, request : SliceFilter) -> List[Dict]:
+    context_id = json_context_id(context_uuid)
+    slice_id = json_slice_id(slice_uuid, context_id=context_id)
+    if deleted:
+        notify_event_slice(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, slice_id)
+        notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+    return Empty()
+
+def slice_select(db_engine : Engine, request : SliceFilter) -> SliceList:
     slice_uuids = [
         slice_get_uuid(slice_id, allow_random=False)[1]
         for slice_id in request.slice_ids.slice_ids
@@ -274,4 +292,5 @@ def slice_select(db_engine : Engine, request : SliceFilter) -> List[Dict]:
         if request.include_config_rules: query = query.options(selectinload(SliceModel.config_rules))
         obj_list : List[SliceModel] = query.filter(SliceModel.slice_uuid.in_(slice_uuids)).all()
         return [obj.dump(**dump_params) for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    slices = run_transaction(sessionmaker(bind=db_engine), callback)
+    return SliceList(slices=slices)
diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py
index 4440299b63f68613854e79998270872389d385cb..1f0fb6c0b3c400d58ea83bc857e97bc50a1324a3 100644
--- a/src/context/service/database/Topology.py
+++ b/src/context/service/database/Topology.py
@@ -17,8 +17,10 @@ from sqlalchemy.dialects.postgresql import insert
 from sqlalchemy.engine import Engine
 from sqlalchemy.orm import Session, selectinload, sessionmaker
 from sqlalchemy_cockroachdb import run_transaction
-from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import ContextId, Topology, TopologyId
+from typing import Dict, List, Optional
+from common.proto.context_pb2 import (
+    ContextId, Empty, EventTypeEnum, Topology, TopologyDetails, TopologyId, TopologyIdList, TopologyList)
+from common.message_broker.MessageBroker import MessageBroker
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology_id
@@ -27,17 +29,19 @@ from .models.LinkModel import LinkModel
 from .models.TopologyModel import TopologyDeviceModel, TopologyLinkModel, TopologyModel
 from .uuids.Context import context_get_uuid
 from .uuids.Topology import topology_get_uuid
+from .Events import notify_event_context, notify_event_topology
 
 LOGGER = logging.getLogger(__name__)
 
-def topology_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
+def topology_list_ids(db_engine : Engine, request : ContextId) -> TopologyIdList:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all()
         return [obj.dump_id() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    topology_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    return TopologyIdList(topology_ids=topology_ids)
 
-def topology_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
+def topology_list_objs(db_engine : Engine, request : ContextId) -> TopologyList:
     context_uuid = context_get_uuid(request, allow_random=False)
     def callback(session : Session) -> List[Dict]:
         obj_list : List[TopologyModel] = session.query(TopologyModel)\
@@ -45,9 +49,10 @@ def topology_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
             .options(selectinload(TopologyModel.topology_links))\
             .filter_by(context_uuid=context_uuid).all()
         return [obj.dump() for obj in obj_list]
-    return run_transaction(sessionmaker(bind=db_engine), callback)
+    topologies = run_transaction(sessionmaker(bind=db_engine), callback)
+    return TopologyList(topologies=topologies)
 
-def topology_get(db_engine : Engine, request : TopologyId) -> Dict:
+def topology_get(db_engine : Engine, request : TopologyId) -> Topology:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
@@ -63,9 +68,9 @@ def topology_get(db_engine : Engine, request : TopologyId) -> Dict:
             'context_uuid generated was: {:s}'.format(context_uuid),
             'topology_uuid generated was: {:s}'.format(topology_uuid),
         ])
-    return obj
+    return Topology(**obj)
 
-def topology_get_details(db_engine : Engine, request : TopologyId) -> Dict:
+def topology_get_details(db_engine : Engine, request : TopologyId) -> TopologyDetails:
     _,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> Optional[Dict]:
         obj : Optional[TopologyModel] = session.query(TopologyModel)\
@@ -82,9 +87,9 @@ def topology_get_details(db_engine : Engine, request : TopologyId) -> Dict:
             'context_uuid generated was: {:s}'.format(context_uuid),
             'topology_uuid generated was: {:s}'.format(topology_uuid),
         ])
-    return obj
+    return TopologyDetails(**obj)
 
-def topology_set(db_engine : Engine, request : Topology) -> Tuple[Dict, bool]:
+def topology_set(db_engine : Engine, messagebroker : MessageBroker, request : Topology) -> TopologyId:
     topology_name = request.name
     if len(topology_name) == 0: topology_name = request.topology_id.topology_uuid.uuid
     context_uuid,topology_uuid = topology_get_uuid(request.topology_id, topology_name=topology_name, allow_random=True)
@@ -120,14 +125,24 @@ def topology_set(db_engine : Engine, request : Topology) -> Tuple[Dict, bool]:
         stmt = stmt.returning(TopologyModel.created_at, TopologyModel.updated_at)
         created_at,updated_at = session.execute(stmt).fetchone()
         return updated_at > created_at
-    
+
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)),updated
+    context_id = json_context_id(context_uuid)
+    topology_id = json_topology_id(topology_uuid, context_id=context_id)
+    event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    notify_event_topology(messagebroker, event_type, topology_id)
+    notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+    return TopologyId(**topology_id)
 
-def topology_delete(db_engine : Engine, request : TopologyId) -> Tuple[Dict, bool]:
+def topology_delete(db_engine : Engine, messagebroker : MessageBroker, request : TopologyId) -> Empty:
     context_uuid,topology_uuid = topology_get_uuid(request, allow_random=False)
     def callback(session : Session) -> bool:
         num_deleted = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid).delete()
         return num_deleted > 0
     deleted = run_transaction(sessionmaker(bind=db_engine), callback)
-    return json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)),deleted
+    context_id = json_context_id(context_uuid)
+    topology_id = json_topology_id(topology_uuid, context_id=context_id)
+    if deleted:
+        notify_event_topology(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, topology_id)
+        notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+    return Empty()
diff --git a/src/context/service/database/models/ComponentModel.py b/src/context/service/database/models/ComponentModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9acfaeabe95c55fc464732d54459d99f9b5b054
--- /dev/null
+++ b/src/context/service/database/models/ComponentModel.py
@@ -0,0 +1,55 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from sqlalchemy import Column, DateTime, ForeignKey, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from ._Base import _Base                                                
+
+class ComponentModel(_Base):
+    __tablename__ = 'device_component'
+    
+    component_uuid  = Column(UUID(as_uuid=False), primary_key=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid',ondelete='CASCADE' ), nullable=False, index=True)
+    name            = Column(String, nullable=False)
+    type            = Column(String, nullable=False)
+    attributes      = Column(String, nullable=False)
+    parent          = Column(String, nullable=False)
+    created_at      = Column(DateTime, nullable=False)
+    updated_at      = Column(DateTime, nullable=False)
+    
+    device           = relationship('DeviceModel', back_populates='components')
+    def dump_id(self) -> Dict:
+        return{
+            'device_id'     : self.device.dump_id(),
+            'component_uuid': {'uuid': self.component_uuid},
+        }
+
+    def dump(self) -> Dict:
+        data = dict()
+        data['attributes']     = json.loads(self.attributes)
+        data['component_uuid'] = {'uuid': self.component_uuid}
+        data['name']           = self.name
+        data['type']           = self.type
+        data['parent']         = self.parent
+        return data
+
+    def dump_name(self) -> Dict:
+        return {
+            'component_id'  : self.dump_id(),
+            'device_name'   : self.device.device_name,
+            'component_name': self.name,
+        }
diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py
index 1097d0b9ab47a86c47ce2ad8394d067ae9f9953e..376dc98c4053f68c511a8c717117d58d9eda1cca 100644
--- a/src/context/service/database/models/DeviceModel.py
+++ b/src/context/service/database/models/DeviceModel.py
@@ -36,6 +36,7 @@ class DeviceModel(_Base):
     #topology_devices = relationship('TopologyDeviceModel', back_populates='device')
     config_rules = relationship('DeviceConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device'
     endpoints    = relationship('EndPointModel', passive_deletes=True) # lazy='joined', back_populates='device'
+    components   = relationship('ComponentModel', passive_deletes=True) # lazy='joined', back_populates='device' 
     controller   = relationship('DeviceModel', remote_side=[device_uuid], passive_deletes=True) # lazy='joined', back_populates='device'
 
     def dump_id(self) -> Dict:
@@ -55,7 +56,7 @@ class DeviceModel(_Base):
         ]}
 
     def dump_components(self) -> List[Dict]:
-        return []
+        return [component.dump() for component in self.components]
 
     def dump(self,
         include_endpoints : bool = True, include_config_rules : bool = True, include_components : bool = True,
@@ -70,5 +71,5 @@ class DeviceModel(_Base):
         }
         if include_endpoints: result['device_endpoints'] = self.dump_endpoints()
         if include_config_rules: result['device_config'] = self.dump_config_rules()
-        if include_components: result['component'] = self.dump_components()
+        if include_components: result['components'] = self.dump_components()
         return result
diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py
index 0ed4a038bcf4426f4cf112bd03c5cb36cb42c822..68d97edf36ee42c04948cc6782b86bce028cb76a 100644
--- a/src/context/service/database/models/TopologyModel.py
+++ b/src/context/service/database/models/TopologyModel.py
@@ -67,7 +67,7 @@ class TopologyDeviceModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_devices'
+    topology = relationship('TopologyModel', lazy='selectin', viewonly=True) # back_populates='topology_devices'
     device   = relationship('DeviceModel',   lazy='selectin') # back_populates='topology_devices'
 
 class TopologyLinkModel(_Base):
@@ -76,5 +76,5 @@ class TopologyLinkModel(_Base):
     topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True, index=True)
 
-    #topology = relationship('TopologyModel', lazy='selectin') # back_populates='topology_links'
+    topology = relationship('TopologyModel', lazy='selectin', viewonly=True) # back_populates='topology_links'
     link     = relationship('LinkModel',     lazy='selectin') # back_populates='topology_links'
diff --git a/src/context/service/database/models/_Base.py b/src/context/service/database/models/_Base.py
index b87b9b06d6adc5825ab5dd84cf64347eb9c26f66..52eb6b088210b8edc8d121221458ae11ece484a5 100644
--- a/src/context/service/database/models/_Base.py
+++ b/src/context/service/database/models/_Base.py
@@ -60,6 +60,9 @@ def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None:
         index_storing('topology_context_uuid_rec_idx', 'topology', ['context_uuid'], [
             'topology_name', 'created_at', 'updated_at'
         ]),
+        index_storing('device_component_idx', 'device_component', ['device_uuid'], [
+            'name', 'type', 'attributes', 'created_at', 'updated_at'
+        ]),
     ]
     def callback(session : Session) -> bool:
         for stmt in statements: session.execute(stmt)
diff --git a/src/context/service/database/models/enums/ConfigAction.py b/src/context/service/database/models/enums/ConfigAction.py
index 5d7aa6b44ecb58f769a1c70a5cabdda98ba51bca..526024a1fedf3331ca45e3cb517ff9d58ce28c25 100644
--- a/src/context/service/database/models/enums/ConfigAction.py
+++ b/src/context/service/database/models/enums/ConfigAction.py
@@ -16,6 +16,11 @@ import enum, functools
 from common.proto.context_pb2 import ConfigActionEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_ConfigActionEnum should be named
+#            as in the proto files removing the prefixes. For example,
+#            proto item ConfigActionEnum.CONFIGACTION_SET should be
+#            included as SET. If item name does not match, automatic
+#            mapping of proto enums to database enums will fail.
 class ORM_ConfigActionEnum(enum.Enum):
     UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED
     SET       = ConfigActionEnum.CONFIGACTION_SET
diff --git a/src/context/service/database/models/enums/ConstraintAction.py b/src/context/service/database/models/enums/ConstraintAction.py
index 65533b6f579ffe153b046dfcb39d37758f4c0577..2a53b8e1c5e97e48cbda9648c8473313bce6f4cd 100644
--- a/src/context/service/database/models/enums/ConstraintAction.py
+++ b/src/context/service/database/models/enums/ConstraintAction.py
@@ -16,6 +16,11 @@ import enum, functools
 from common.proto.context_pb2 import ConstraintActionEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_ConstraintActionEnum should be named
+#            as in the proto files removing the prefixes. For example, proto
+#            item ConstraintActionEnum.CONFIGACTION_SET should be included
+#            as SET. If item name does not match, automatic mapping of proto
+#            enums to database enums will fail.
 class ORM_ConstraintActionEnum(enum.Enum):
     UNDEFINED = ConstraintActionEnum.CONSTRAINTACTION_UNDEFINED
     SET       = ConstraintActionEnum.CONSTRAINTACTION_SET
diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py
index 09be94b1d7ec041f2d3f50f832f15017fb62e63c..66635decc5369c8b7601863da85f497626d70ac8 100644
--- a/src/context/service/database/models/enums/DeviceDriver.py
+++ b/src/context/service/database/models/enums/DeviceDriver.py
@@ -16,13 +16,18 @@ import enum, functools
 from common.proto.context_pb2 import DeviceDriverEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_DeviceDriverEnum should be named as in
+#            the proto files removing the prefixes. For example, proto item
+#            DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG should be included as
+#            OPENCONFIG. If item name does not match, automatic mapping of
+#            proto enums to database enums will fail.
 class ORM_DeviceDriverEnum(enum.Enum):
     UNDEFINED             = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED
     OPENCONFIG            = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG
     TRANSPORT_API         = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API
     P4                    = DeviceDriverEnum.DEVICEDRIVER_P4
     IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY
-    ONF_TR_352            = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352
+    ONF_TR_532            = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532
     XR                    = DeviceDriverEnum.DEVICEDRIVER_XR
     IETF_L2VPN            = DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN
     GNMI_OPENCONFIG       = DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG
diff --git a/src/context/service/database/models/enums/DeviceOperationalStatus.py b/src/context/service/database/models/enums/DeviceOperationalStatus.py
index a121fab86408493bf0b211f4fcc8423aafa969cf..9e98869dce025e51c20ff0aca0dcd78a9ab57fe1 100644
--- a/src/context/service/database/models/enums/DeviceOperationalStatus.py
+++ b/src/context/service/database/models/enums/DeviceOperationalStatus.py
@@ -16,6 +16,11 @@ import enum, functools
 from common.proto.context_pb2 import DeviceOperationalStatusEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_DeviceOperationalStatusEnum should be
+#            named as in the proto files removing the prefixes. For example,
+#            proto item DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+#            should be declared as ENABLED. If item name does not match, automatic
+#            mapping of proto enums to database enums will fail.
 class ORM_DeviceOperationalStatusEnum(enum.Enum):
     UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED
     DISABLED  = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
diff --git a/src/context/service/database/models/enums/KpiSampleType.py b/src/context/service/database/models/enums/KpiSampleType.py
index 3bf5d06f047016af7167d0d59a315ac465abfd19..5cef9ac199a0cc3389092e4ea375940e27554066 100644
--- a/src/context/service/database/models/enums/KpiSampleType.py
+++ b/src/context/service/database/models/enums/KpiSampleType.py
@@ -16,6 +16,11 @@ import enum, functools
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_KpiSampleTypeEnum should be named as in
+#            the proto files removing the prefixes. For example, proto item
+#            KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED should be declared as
+#            BYTES_RECEIVED. If item name does not match, automatic mapping of
+#            proto enums to database enums will fail.
 class ORM_KpiSampleTypeEnum(enum.Enum):
     UNKNOWN             = KpiSampleType.KPISAMPLETYPE_UNKNOWN
     PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
diff --git a/src/context/service/database/models/enums/PolicyRuleState.py b/src/context/service/database/models/enums/PolicyRuleState.py
index c4aa950a11605682f1c78e544767ea7e0a7b24b4..e16ee01e36b5e2ec2ad8e07f7cb6201ee091e76f 100644
--- a/src/context/service/database/models/enums/PolicyRuleState.py
+++ b/src/context/service/database/models/enums/PolicyRuleState.py
@@ -16,6 +16,13 @@ import enum, functools
 from common.proto.policy_pb2 import PolicyRuleStateEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_PolicyRuleStateEnum should be named as in
+#            the proto files removing the prefixes. For example, proto item
+#            PolicyRuleStateEnum.POLICY_INSERTED should be declared as INSERTED.
+#            In this case, since the entries in the proto enum have a different prefix
+#            than that specified in class ORM_PolicyRuleStateEnum, we force the prefix
+#            using argument grpc_enum_prefix. If item name does not match, automatic
+#            mapping of proto enums to database enums will fail.
 class ORM_PolicyRuleStateEnum(enum.Enum):
     UNDEFINED   = PolicyRuleStateEnum.POLICY_UNDEFINED   # Undefined rule state
     FAILED      = PolicyRuleStateEnum.POLICY_FAILED      # Rule failed
diff --git a/src/context/service/database/models/enums/ServiceStatus.py b/src/context/service/database/models/enums/ServiceStatus.py
index cd2a183b825eff54a51a844ea6834263bbabbc31..ae0ad55bcfe327b981881bcd304124c9bb6576ab 100644
--- a/src/context/service/database/models/enums/ServiceStatus.py
+++ b/src/context/service/database/models/enums/ServiceStatus.py
@@ -16,6 +16,11 @@ import enum, functools
 from common.proto.context_pb2 import ServiceStatusEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_ServiceStatusEnum should be named as in
+#            the proto files removing the prefixes. For example, proto item
+#            ServiceStatusEnum.SERVICESTATUS_PLANNED should be declared as PLANNED.
+#            If item name does not match, automatic mapping of proto enums to
+#            database enums will fail.
 class ORM_ServiceStatusEnum(enum.Enum):
     UNDEFINED       = ServiceStatusEnum.SERVICESTATUS_UNDEFINED
     PLANNED         = ServiceStatusEnum.SERVICESTATUS_PLANNED
diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py
index 3937eaa114429ce9d004933a5d5baf1ae6137513..0ed1938a7ca1e566bea815d9ce936150bb91d9dc 100644
--- a/src/context/service/database/models/enums/ServiceType.py
+++ b/src/context/service/database/models/enums/ServiceType.py
@@ -16,6 +16,11 @@ import enum, functools
 from common.proto.context_pb2 import ServiceTypeEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_ServiceTypeEnum should be named as in
+#            the proto files removing the prefixes. For example, proto item
+#            ConfigActionEnum.CONFIGACTION_SET should be declared as SET.
+#            If item name does not match, automatic mapping of proto enums to
+#            database enums will fail.
 class ORM_ServiceTypeEnum(enum.Enum):
     UNKNOWN                   = ServiceTypeEnum.SERVICETYPE_UNKNOWN
     L3NM                      = ServiceTypeEnum.SERVICETYPE_L3NM
diff --git a/src/context/service/database/models/enums/SliceStatus.py b/src/context/service/database/models/enums/SliceStatus.py
index 5d77578b4cb4ee155981ede4395b1cafc3be4ef7..32ef0cc4a3d65aa96346b038534ca65bc3e4f95c 100644
--- a/src/context/service/database/models/enums/SliceStatus.py
+++ b/src/context/service/database/models/enums/SliceStatus.py
@@ -16,6 +16,11 @@ import enum, functools
 from common.proto.context_pb2 import SliceStatusEnum
 from ._GrpcToEnum import grpc_to_enum
 
+# IMPORTANT: Entries of enum class ORM_SliceStatusEnum should be named as in
+#            the proto file but removing the prefixes. For example, proto item
+#            SliceStatusEnum.SLICESTATUS_PLANNED should be declared as PLANNED.
+#            If item name does not match, automatic mapping of proto enums to
+#            database enums will fail.
 class ORM_SliceStatusEnum(enum.Enum):
     UNDEFINED = SliceStatusEnum.SLICESTATUS_UNDEFINED
     PLANNED   = SliceStatusEnum.SLICESTATUS_PLANNED
diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py
index 4ae7128b0258536dae5fbed2ae86457d4f9f969f..0d85e8ff9668c5715dfc9d830027a5ae1faed9b5 100644
--- a/src/device/service/drivers/__init__.py
+++ b/src/device/service/drivers/__init__.py
@@ -69,7 +69,7 @@ DRIVERS.append(
         #        DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
         #        DeviceDriverEnum.DEVICEDRIVER_P4,
         #        DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
-        #        DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352,
+        #        DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532,
         #        DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG,
         #    ],
         #}
diff --git a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
index 9498dc84cc6991fd2295371842fa8508c961f1bc..c79dde99a4d3c48f2f27ff00451f50aa1af9bee2 100644
--- a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
+++ b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py
@@ -19,6 +19,7 @@ from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.type_checkers.Checkers import chk_string, chk_type
 from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES
+from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology
 from device.service.drivers.ietf_l2vpn.TfsDebugApiClient import TfsDebugApiClient
 from .Tools import connection_point, wim_mapping
 from .WimconnectorIETFL2VPN import WimconnectorIETFL2VPN
@@ -59,6 +60,14 @@ class IetfL2VpnDriver(_Driver):
         self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config)
         self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors
 
+        # Options are:
+        #    disabled --> just import endpoints as usual
+        #    devices  --> imports sub-devices but not links connecting them.
+        #                 (a remotely-controlled transport domain might exist between them)
+        #    topology --> imports sub-devices and links connecting them.
+        #                 (not supported by XR driver)
+        self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.DEVICES)
+
     def Connect(self) -> bool:
         with self.__lock:
             try:
@@ -93,7 +102,7 @@ class IetfL2VpnDriver(_Driver):
                     chk_string(str_resource_name, resource_key, allow_empty=False)
                     if resource_key == RESOURCE_ENDPOINTS:
                         # return endpoints through debug-api and list-devices method
-                        results.extend(self.dac.get_devices_endpoints())
+                        results.extend(self.dac.get_devices_endpoints(self.__import_topology))
                     elif resource_key == RESOURCE_SERVICES:
                         # return all services through 
                         reply = self.wim.get_all_active_connectivity_services()
diff --git a/src/device/service/drivers/ietf_l2vpn/TfsDebugApiClient.py b/src/device/service/drivers/ietf_l2vpn/TfsDebugApiClient.py
index 4bf40af030fda990f96efe0ff8ab2ce54f82c312..2d3901695abc4c0124a7f443ffa59f825d4e13bf 100644
--- a/src/device/service/drivers/ietf_l2vpn/TfsDebugApiClient.py
+++ b/src/device/service/drivers/ietf_l2vpn/TfsDebugApiClient.py
@@ -15,8 +15,10 @@
 import logging, requests
 from requests.auth import HTTPBasicAuth
 from typing import Dict, List, Optional
+from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum
 
 GET_DEVICES_URL = '{:s}://{:s}:{:d}/restconf/debug-api/devices'
+GET_LINKS_URL   = '{:s}://{:s}:{:d}/restconf/debug-api/links'
 TIMEOUT = 30
 
 HTTP_OK_CODES = {
@@ -38,9 +40,10 @@ MAPPING_DRIVER = {
     'DEVICEDRIVER_TRANSPORT_API'        : 2,
     'DEVICEDRIVER_P4'                   : 3,
     'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4,
-    'DEVICEDRIVER_ONF_TR_352'           : 5,
+    'DEVICEDRIVER_ONF_TR_532'           : 5,
     'DEVICEDRIVER_XR'                   : 6,
     'DEVICEDRIVER_IETF_L2VPN'           : 7,
+    'DEVICEDRIVER_GNMI_OPENCONFIG'      : 8,
 }
 
 MSG_ERROR = 'Could not retrieve devices in remote TeraFlowSDN instance({:s}). status_code={:s} reply={:s}'
@@ -52,16 +55,23 @@ class TfsDebugApiClient:
         self, address : str, port : int, scheme : str = 'http',
         username : Optional[str] = None, password : Optional[str] = None
     ) -> None:
-        self._url = GET_DEVICES_URL.format(scheme, address, port)
+        self._devices_url = GET_DEVICES_URL.format(scheme, address, port)
+        self._links_url = GET_LINKS_URL.format(scheme, address, port)
         self._auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
 
-    def get_devices_endpoints(self) -> List[Dict]:
-        reply = requests.get(self._url, timeout=TIMEOUT, verify=False, auth=self._auth)
+    def get_devices_endpoints(self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES) -> List[Dict]:
+        LOGGER.debug('[get_devices_endpoints] begin')
+        LOGGER.debug('[get_devices_endpoints] import_topology={:s}'.format(str(import_topology)))
+
+        reply = requests.get(self._devices_url, timeout=TIMEOUT, verify=False, auth=self._auth)
         if reply.status_code not in HTTP_OK_CODES:
-            msg = MSG_ERROR.format(str(self._url), str(reply.status_code), str(reply))
+            msg = MSG_ERROR.format(str(self._devices_url), str(reply.status_code), str(reply))
             LOGGER.error(msg)
             raise Exception(msg)
 
+        if import_topology == ImportTopologyEnum.DISABLED:
+            raise Exception('Unsupported import_topology mode: {:s}'.format(str(import_topology)))
+
         result = list()
         for json_device in reply.json()['devices']:
             device_uuid : str = json_device['device_id']['device_uuid']['uuid']
@@ -89,4 +99,29 @@ class TfsDebugApiClient:
                 }
                 result.append((endpoint_url, endpoint_data))
 
+        if import_topology == ImportTopologyEnum.DEVICES:
+            LOGGER.debug('[get_devices_endpoints] devices only; returning')
+            return result
+
+        reply = requests.get(self._links_url, timeout=TIMEOUT, verify=False, auth=self._auth)
+        if reply.status_code not in HTTP_OK_CODES:
+            msg = MSG_ERROR.format(str(self._links_url), str(reply.status_code), str(reply))
+            LOGGER.error(msg)
+            raise Exception(msg)
+
+        for json_link in reply.json()['links']:
+            link_uuid : str = json_link['link_id']['link_uuid']['uuid']
+            link_url = '/links/link[{:s}]'.format(link_uuid)
+            link_endpoint_ids = [
+                (json_endpoint_id['device_id']['device_uuid']['uuid'], json_endpoint_id['endpoint_uuid']['uuid'])
+                for json_endpoint_id in json_link['link_endpoint_ids']
+            ]
+            link_data = {
+                'uuid': json_link['link_id']['link_uuid']['uuid'],
+                'name': json_link['name'],
+                'endpoints': link_endpoint_ids,
+            }
+            result.append((link_url, link_data))
+
+        LOGGER.debug('[get_devices_endpoints] topology; returning')
         return result
diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py
index b34efbc8fbcc8e4a340e8c2282268b9f0246fddc..8c6e07b3f00a975a909161006e59e89de0ceaaf3 100644
--- a/src/device/service/drivers/openconfig/OpenConfigDriver.py
+++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py
@@ -109,7 +109,10 @@ class NetconfSessionHandler:
     @RETRY_DECORATOR
     def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin
         with self.__lock:
-            return self.__manager.get(filter=filter, with_defaults=with_defaults)
+            if self.__vendor == 'JUNIPER'and not 'component' in str(filter):
+                return self.__manager.get_config(source="running", filter=filter, with_defaults=with_defaults)
+            else:
+                return self.__manager.get(filter=filter, with_defaults=with_defaults)
 
     @RETRY_DECORATOR
     def edit_config(
diff --git a/src/device/service/drivers/openconfig/templates/Interfaces.py b/src/device/service/drivers/openconfig/templates/Interfaces.py
index 3855db17b45505d4131089b2b9abd995fa221419..3d4c73fc11c686b4d4e181a1f98ed3f5922f7c15 100644
--- a/src/device/service/drivers/openconfig/templates/Interfaces.py
+++ b/src/device/service/drivers/openconfig/templates/Interfaces.py
@@ -22,6 +22,7 @@ LOGGER = logging.getLogger(__name__)
 XPATH_INTERFACES    = "//oci:interfaces/oci:interface"
 XPATH_SUBINTERFACES = ".//oci:subinterfaces/oci:subinterface"
 XPATH_IPV4ADDRESSES = ".//ociip:ipv4/ociip:addresses/ociip:address"
+XPATH_IPV6ADDRESSES = ".//ociip:ipv6/ociip:addresses/ociip:address"
 
 def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
     response = []
@@ -97,6 +98,15 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
 
             #add_value_from_collection(subinterface, 'ipv4_addresses', ipv4_addresses)
 
+            for xml_ipv6_address in xml_subinterface.xpath(XPATH_IPV6ADDRESSES, namespaces=NAMESPACES):
+                #LOGGER.info('xml_ipv6_address = {:s}'.format(str(ET.tostring(xml_ipv6_address))))
+
+                address = xml_ipv6_address.find('ociip:state/ociip:ip', namespaces=NAMESPACES)
+                add_value_from_tag(subinterface, 'address_ipv6', address)
+                
+                prefix = xml_ipv6_address.find('ociip:state/ociip:prefix-length', namespaces=NAMESPACES)
+                add_value_from_tag(subinterface, 'address_prefix_v6', prefix, cast=int)
+                
             if len(subinterface) == 0: continue
             resource_key = '/interface[{:s}]/subinterface[{:s}]'.format(interface['name'], str(subinterface['index']))
             response.append((resource_key, subinterface))
diff --git a/src/device/service/drivers/openconfig/templates/Inventory.py b/src/device/service/drivers/openconfig/templates/Inventory.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ae67ba47dad162b8c8e4a15d3004b27359d4ca2
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/Inventory.py
@@ -0,0 +1,157 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, lxml.etree as ET
+from typing import Any, Dict, List, Tuple
+from .Namespace import NAMESPACES
+from .Tools import add_value_from_tag
+
+LOGGER = logging.getLogger(__name__)
+
+XPATH_PORTS = "//ocp:components/ocp:component"
+
+"""
+#Method Name: parse
+
+#Parameters:
+    
+    - xml_data: [ET.Element] Represents the XML data to be parsed.
+
+# Functionality:
+
+    The parse function of the inventerio class has the functionality to parse
+    an XML document represented by the xml_data parameter and extract specific 
+    information from the XML elements, namely the relevant characteristics of the 
+    components.     
+
+    To generate the template the following steps are performed:
+
+    1) An empty list called response is created to store the results of the analysis.
+
+    2) Iterate over the XML elements that match the pattern specified by the XPATH_PORTS 
+    expression. These elements represent components in the XML document.
+
+    3) For each component element:
+    A dictionary called inventory is initialized that will store the information extracted 
+    from the component.The values of the relevant XML elements are extracted and added to
+    the dictionary.
+
+#Return: 
+    List[Tuple[str, Dict[str, Any]]] The response list containing the tuples (path, dictionary) 
+    with the information extracted from the XML document components is returned.
+"""
+
+def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
+    response = []
+    LOGGER.debug("InventoryPrueba")
+    parent_types = {}
+    for xml_component in xml_data.xpath(XPATH_PORTS, namespaces=NAMESPACES):
+        LOGGER.info('xml_component inventario = {:s}'.format(str(ET.tostring(xml_component))))
+        inventory = {}
+        inventory['parent-component-references'] = ''
+        inventory['name'] = ''    
+        inventory['class'] = ''
+        inventory['attributes'] = {}
+        component_reference = []
+
+        component_name = xml_component.find('ocp:name', namespaces=NAMESPACES)
+        if component_name is None or component_name.text is None: continue
+        add_value_from_tag(inventory, 'name', component_name)        
+
+        component_description = xml_component.find('ocp:state/ocp:description', namespaces=NAMESPACES)
+        if not component_description is None:
+            add_value_from_tag(inventory['attributes'], 'description', component_description)
+        
+        component_location = xml_component.find('ocp:state/ocp:location', namespaces=NAMESPACES)
+        if not component_location is None:
+            add_value_from_tag(inventory['attributes'], 'location', component_location)
+        
+        component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES)
+        component_type.text = component_type.text.replace('oc-platform-types:','')
+        if component_type is None: continue
+        add_value_from_tag(inventory, 'class', component_type)
+        
+        if inventory['class'] == 'CPU' or inventory['class'] == 'STORAGE': continue
+
+        component_empty = xml_component.find('ocp:state/ocp:empty', namespaces=NAMESPACES)
+        if not component_empty is None:
+            add_value_from_tag(inventory['attributes'], 'empty', component_empty)
+
+        component_parent = xml_component.find('ocp:state/ocp:parent', namespaces=NAMESPACES)
+        if not component_parent is None: 
+            add_value_from_tag(inventory, 'parent-component-references', component_parent)
+
+        component_HW = xml_component.find('ocp:state/ocp:hardware-version', namespaces=NAMESPACES)
+        if not component_HW is None:
+            add_value_from_tag(inventory['attributes'], 'hardware-rev', component_HW)
+
+        component_firmware_version = xml_component.find('ocp:state/ocp:firmware-version', namespaces=NAMESPACES)
+        if not component_firmware_version is None:
+            add_value_from_tag(inventory['attributes'], 'firmware-rev', component_firmware_version)
+
+        component_SW = xml_component.find('ocp:state/ocp:software-version', namespaces=NAMESPACES)
+        if not component_SW is None:
+            add_value_from_tag(inventory['attributes'], 'software-rev', component_SW)
+
+        component_serial = xml_component.find('ocp:state/ocp:serial-no', namespaces=NAMESPACES)
+        if not component_serial is None:
+            add_value_from_tag(inventory['attributes'], 'serial-num', component_serial)
+
+        component_mfg_name = xml_component.find('ocp:state/ocp:mfg-name', namespaces=NAMESPACES)
+        if not component_mfg_name is None:
+            add_value_from_tag(inventory['attributes'], 'manufacturer-name', component_mfg_name)
+        
+        component_removable = xml_component.find('ocp:state/ocp:removable', namespaces=NAMESPACES)
+        if not component_removable is None:
+            add_value_from_tag(inventory['attributes'], 'removable', component_removable)
+
+        component_mfg_date = xml_component.find('ocp:state/ocp:mfg-date', namespaces=NAMESPACES)
+        if not component_mfg_date is None:
+            add_value_from_tag(inventory['attributes'], 'mfg-date', component_mfg_date)
+
+        #Transceiver Information
+        component_serial_t = xml_component.find('ocptr:transceiver/ocptr:state/ocptr:serial-no', namespaces=NAMESPACES)
+        if not component_serial_t is None:
+            add_value_from_tag(inventory['attributes'], 'serial-num', component_serial_t)
+            
+        component_present = xml_component.find('ocptr:transceiver/ocptr:state/ocptr:present', namespaces=NAMESPACES)
+        if component_present is not None and 'NOT_PRESENT' in component_present.text: continue
+        
+        component_vendor = xml_component.find('ocptr:transceiver/ocptr:state/ocptr:vendor', namespaces=NAMESPACES)
+        if not component_vendor is None:
+            add_value_from_tag(inventory['attributes'], 'vendor', component_vendor)
+        component_connector = xml_component.find('ocptr:transceiver/ocptr:state/ocptr:connector-type', namespaces=NAMESPACES)
+        if not component_connector is None:
+            component_connector.text = component_connector.text.replace('oc-opt-types:','')
+            add_value_from_tag(inventory['attributes'], 'connector-type', component_connector)
+        
+        component_form = xml_component.find('ocptr:transceiver/ocptr:state/ocptr:form-factor', namespaces=NAMESPACES)
+        if not component_form is None:
+            component_form.text = component_form.text.replace('oc-opt-types:','')
+            add_value_from_tag(inventory['attributes'], 'form-factor', component_form)
+
+        if inventory['parent-component-references'] not in parent_types:
+            parent_types[inventory['parent-component-references']] = len(parent_types) + 1
+
+        component_reference.extend([parent_types[inventory['parent-component-references']]])
+        
+        response.append(('/inventory/{:s}'.format(inventory['name']), inventory))
+
+        for tupla in response:
+            if inventory['parent-component-references'] in tupla[0]:
+                component_reference.extend([tupla[1]['class']])
+
+        inventory['component-reference'] = component_reference
+        
+    return response
diff --git a/src/device/service/drivers/openconfig/templates/Namespace.py b/src/device/service/drivers/openconfig/templates/Namespace.py
index b70d5c32775075ae299bf18f8ecfc530544efe7b..bdc27a1ff30d5ac18b9233cdd420cd8493e7a419 100644
--- a/src/device/service/drivers/openconfig/templates/Namespace.py
+++ b/src/device/service/drivers/openconfig/templates/Namespace.py
@@ -45,5 +45,5 @@ NAMESPACES = {
     'ocpt2': NAMESPACE_POLICY_TYPES_2,
     'ocrp' : NAMESPACE_ROUTING_POLICY,
     'ocv'  : NAMESPACE_VLAN,
-    'ocptr' : NAMESPACE_PLATFORM_TRANSCEIVER,
+    'ocptr': NAMESPACE_PLATFORM_TRANSCEIVER,
 }
diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py
index 844f9d2d41c6d30c2e9eaaf3bd0ea4e89a991221..279b7b2d3e2437ab41db1a95aef392fd81313342 100644
--- a/src/device/service/drivers/openconfig/templates/__init__.py
+++ b/src/device/service/drivers/openconfig/templates/__init__.py
@@ -19,12 +19,13 @@ from jinja2 import Environment, PackageLoader, select_autoescape
 import paramiko
 from .Tools import generate_templates
 from device.service.driver_api._Driver import (
-    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES,RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_ACL)
+    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_ACL, RESOURCE_INVENTORY)
 from .EndPoints import parse as parse_endpoints
 from .Interfaces import parse as parse_interfaces, parse_counters
 from .NetworkInstances import parse as parse_network_instances
 from .RoutingPolicy import parse as parse_routing_policy
 from .Acl import parse as parse_acl
+from .Inventory import parse as parse_inventory
 LOGGER = logging.getLogger(__name__)
 
 ALL_RESOURCE_KEYS = [
diff --git a/src/device/service/drivers/openconfig/templates/interfaces_mng/get.xml b/src/device/service/drivers/openconfig/templates/interfaces_mng/get.xml
deleted file mode 100644
index 1003bff154619cedd3e8aecb9d3044aac0b6a0a3..0000000000000000000000000000000000000000
--- a/src/device/service/drivers/openconfig/templates/interfaces_mng/get.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<interfaces xmlns="http://openconfig.net/yang/interfaces">
-    <interface/>
-</interfaces>
\ No newline at end of file
diff --git a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
index 91793230d0626d9a8dc112c6442a7364b6beb1a1..a5c38151c0f44534b96c550860deac495d442c4d 100644
--- a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
+++ b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
@@ -28,7 +28,7 @@ from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_m
 from common.proto.context_pb2 import Empty, Timestamp
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.proto.l3_attackmitigator_pb2 import L3AttackmitigatorOutput
-from common.proto.l3_centralizedattackdetector_pb2 import AttackIPs, AutoFeatures, L3CentralizedattackdetectorMetrics, StatusMessage
+from common.proto.l3_centralizedattackdetector_pb2 import AttackIPs, AutoFeatures, L3CentralizedattackdetectorMetrics, L3CentralizedattackdetectorBatchInput, StatusMessage
 from common.proto.l3_centralizedattackdetector_pb2_grpc import L3CentralizedattackdetectorServicer
 from common.proto.monitoring_pb2 import Kpi, KpiDescriptor
 from common.tools.timestamp.Converters import timestamp_utcnow_to_float
diff --git a/src/l3_distributedattackdetector/service/known_attack_ips.csv b/src/l3_distributedattackdetector/service/known_attack_ips.csv
new file mode 100644
index 0000000000000000000000000000000000000000..254dad83da9342581186335ca63c6ea1e8b2a219
--- /dev/null
+++ b/src/l3_distributedattackdetector/service/known_attack_ips.csv
@@ -0,0 +1 @@
+37.187.95.110,91.121.140.167,94.23.23.52,94.23.247.226,149.202.83.171
\ No newline at end of file
diff --git a/src/l3_distributedattackdetector/service/l3_distributedattackdetector.py b/src/l3_distributedattackdetector/service/l3_distributedattackdetector.py
index 357f44a9ab2037438252fb0ca40b1a7dc3c74c54..6c9d09dc57d2966538dbe804117e330e06b10bd4 100644
--- a/src/l3_distributedattackdetector/service/l3_distributedattackdetector.py
+++ b/src/l3_distributedattackdetector/service/l3_distributedattackdetector.py
@@ -13,28 +13,26 @@
 # limitations under the License.
 
 import asyncio
-import grpc
 import logging
-import numpy as np
 import os
 import signal
 import time
 from sys import stdout
-from common.proto.context_pb2 import (
-    Empty,
-    ServiceTypeEnum,
-    ContextId,
-)
+
+import grpc
+import numpy as np
+
+from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
 from common.proto.context_pb2_grpc import ContextServiceStub
 from common.proto.l3_centralizedattackdetector_pb2 import (
-    L3CentralizedattackdetectorMetrics,
-    L3CentralizedattackdetectorBatchInput,
     ConnectionMetadata,
     Feature,
+    L3CentralizedattackdetectorBatchInput,
+    L3CentralizedattackdetectorMetrics,
 )
 from common.proto.l3_centralizedattackdetector_pb2_grpc import L3CentralizedattackdetectorStub
 
-#  Setup LOGGER
+# Setup LOGGER
 LOGGER = logging.getLogger("dad_LOGGER")
 LOGGER.setLevel(logging.INFO)
 logFormatter = logging.Formatter(fmt="%(levelname)-8s %(message)s")
@@ -42,112 +40,202 @@ consoleHandler = logging.StreamHandler(stdout)
 consoleHandler.setFormatter(logFormatter)
 LOGGER.addHandler(consoleHandler)
 
+# Define constants
 TSTAT_DIR_NAME = "piped/"
-CENTRALIZED_ATTACK_DETECTOR = "192.168.165.78:10001"
+CONTROLLER_IP = "192.168.165.78"  # Change this to the IP of the controller
+CONTEXT_ID = "admin"  # Change this to the context ID to be used
+CONTEXT_CHANNEL = f"{CONTROLLER_IP}:1010"
+CENTRALIZED_ATTACK_DETECTOR = f"{CONTROLLER_IP}:10001"
 JSON_BLANK = {
     "ip_o": "",  # Client IP
     "port_o": "",  # Client port
     "ip_d": "",  # Server ip
     "port_d": "",  # Server port
-    "flow_id": "",  # Identifier:c_ip,c_port,s_ip,s_port,time_start
+    "flow_id": "",  # Identifier: c_ip,c_port, s_ip,s_port, time_start
     "protocol": "",  # Connection protocol
     "time_start": 0.0,  # Start of connection
     "time_end": 0.0,  # Time of last packet
 }
-
 STOP = False
 IGNORE_FIRST_LINE_TSTAT = True
-
-CONTEXT_ID = "admin"
-CONTEXT_CHANNEL = "192.168.165.78:1010"
 PROFILING = False
 SEND_DATA_IN_BATCHES = False
 BATCH_SIZE = 10
-ATTACK_IPS = ["37.187.95.110", "91.121.140.167", "94.23.23.52", "94.23.247.226", "149.202.83.171"]
 
-class l3_distributedattackdetector():
+
+class l3_distributedattackdetector:
     def __init__(self):
+        """
+        Initializes a Distributed Attack Detector.
+
+        This method initializes a Distributed Attack Detector by setting up instance variables, connecting to the Centralized Attack Detector, obtaining feature IDs, and starting the process traffic loop. It also sets up a signal handler for handling keyboard interrupts.
+
+        Args:
+            None.
+
+        Returns:
+            None.
+        """
+    
         LOGGER.info("Creating Distributed Attack Detector")
-        
+
         self.feature_ids = []
-        
+
         self.cad_features = {}
         self.conn_id = ()
-        
-        self.connections_dict = {} # Dict for storing ALL data
-        self.new_connections = {} # Dict for storing NEW data
-        
+
+        self.connections_dict = {}  # Dictionary for storing all connections data
+        self.new_connections = {}  # Dictionary for storing new connections data
+
+        self.known_attack_ips = self.read_kwnown_attack_ips()
+
         signal.signal(signal.SIGINT, self.handler)
-        
+
         with grpc.insecure_channel(CENTRALIZED_ATTACK_DETECTOR) as channel:
             self.cad = L3CentralizedattackdetectorStub(channel)
-            LOGGER.info("Connected to the centralized attack detector")
+            LOGGER.info("Connected to the Centralized Attack Detector")
 
-            LOGGER.info("Obtaining features...")
+            LOGGER.info("Obtaining features Ids. from the Centralized Attack Detector...")
             self.feature_ids = self.get_features_ids()
             LOGGER.info("Features Ids.: {:s}".format(str(self.feature_ids)))
-            
+
             asyncio.run(self.process_traffic())
-            
+    
+    def read_kwnown_attack_ips(self):
+        """
+        Reads a list of known attack IPs from a CSV file.
+
+        This method reads a list of known attack IPs from a CSV file named "known_attack_ips.csv". The method returns a list of strings containing the IP addresses.
+
+        Args:
+            None.
+
+        Returns:
+            List[str]: A list of strings containing the IP addresses of known attack sources.
+        """
+        
+        # Initialize an empty list to store the known attack IPs
+        known_attack_ips = []
+
+        # Open the known attack IPs CSV file
+        with open("known_attack_ips.csv", "r") as f:
+            # Read the contents of the file and split it into a list of strings
+            known_attack_ips = f.read().split(",")
+
+        # Return the list of known attack IPs
+        return known_attack_ips
         
     def handler(self):
+        """
+        Handles a keyboard interrupt signal.
+
+        This method handles a keyboard interrupt signal by setting the `STOP` flag to `True` and logging a message indicating that the program is stopping gracefully.
+
+        Args:
+            None.
+
+        Returns:
+            None.
+        """
+        
+        # Set the STOP flag to True
         if STOP:
             exit()
-
         STOP = True
 
+        # Log a message indicating that the program is stopping gracefully
         LOGGER.info("Gracefully stopping...")
-    
-    def follow(self, thefile, time_sleep):
+
+    def follow(self, logfile, time_sleep):
         """
-        Generator function that yields new lines in a file
-        It reads the logfie (the opened file)
+        Generator function that yields new lines in a log file.
+
+        This method reads a file object and yields new lines as they are added to the file. The method uses an infinite loop to continuously read the last line of the file. If the file hasn't been updated, the method sleeps for the specified `time_sleep` interval. If the last line of the file doesn't end with a newline character, the method appends the line to a `chunk` variable. When a newline character is encountered, the method yields the line, possibly appending the `chunk` variable to the line if it contains a partial line. The method returns an iterator that yields lines from the file.
+
+        Args:
+            file (TextIO): The file object to read from.
+            time_sleep (float): The time to sleep if the file hasn't been updated.
+
+        Yields:
+            str: The next line in the file.
+
+        Returns:
+            None.
         """
+
         # seek the end of the file
-        # thefile.seek(0, os.SEEK_END)
+        # logfile.seek(0, os.SEEK_END)
 
-        trozo = ""
+        chunk = ""
 
-        # start infinite loop
+        # start an infinite loop
         while True:
-            # read last line of file
-            line = thefile.readline()
+            # read last line of the file
+            line = logfile.readline()
 
-            # sleep if file hasn't been updated
+            # sleep if the file hasn't been updated
             if not line:
                 time.sleep(time_sleep)
                 continue
+
             if line[-1] != "\n":
-                trozo += line
+                chunk += line
             else:
-                if trozo != "":
-                    line = trozo + line
-                    trozo = ""
+                if chunk != "":
+                    line = chunk + line
+                    chunk = ""
+
                 yield line
 
+    def load_file(self, dirname=TSTAT_DIR_NAME):
+        """
+        Loads the latest Tstat log file.
+
+        This method loads the latest Tstat log file by searching for the most recent directory in the specified `dirname` directory. If a directory is found, the method returns the path to the `log_tcp_temp_complete` file in that directory. If no directory is found, the method logs a message and waits for 5 seconds before trying again.
 
-    def load_file(self, dirname=TSTAT_DIR_NAME):  # - Client side -
+        Args:
+            dirname (str): The name of the directory to search for Tstat log files. Defaults to `TSTAT_DIR_NAME`.
+
+        Returns:
+            str: The path to the latest Tstat log file.
+        """
+        
         while True:
+            # Get the path to the Tstat directory
             here = os.path.dirname(os.path.abspath(__file__))
             tstat_piped = os.path.join(here, dirname)
+
+            # Get a list of all directories in the Tstat directory
             tstat_dirs = os.listdir(tstat_piped)
+
+            # If there are directories in the Tstat directory, find the most recent one and return the path to the log file
             if len(tstat_dirs) > 0:
                 tstat_dirs.sort()
                 new_dir = tstat_dirs[-1]
                 tstat_file = tstat_piped + new_dir + "/log_tcp_temp_complete"
+
                 LOGGER.info("Following: {:s}".format(str(tstat_file)))
+
                 return tstat_file
+            # If there are no directories in the Tstat directory, log a message and wait for 5 seconds before trying again
             else:
-                LOGGER.info("No Tstat directory!")
+                LOGGER.info("No Tstat directory found. Waiting...")
                 time.sleep(5)
 
-
     def process_line(self, line):
         """
-        - Preprocessing before a message per line
-        - Avoids crash when nan are found by generating a 0s array
-        - Returns a list of values
+        Processes a single line of input data and returns a list of feature values.
+
+        Args:
+            line (str): A single line of input data containing feature values separated by spaces.
+
+        Returns:
+            List[float]: A list of feature values extracted from the input line.
+
+        Raises:
+            IndexError: If the input line does not contain enough feature values.
         """
+        
         line = line.split(" ")
 
         try:
@@ -157,67 +245,106 @@ class l3_distributedattackdetector():
                 feature = feature_id - 1
                 values.append(float(line[feature]))
         except IndexError:
-            print("IndexError: {0}".format(line))
+            LOGGER.error("IndexError: {0}".format(line))
 
         return values
 
+    def get_services(self, context_id_str):
+        """
+        Gets the services for a given context ID.
 
-    def get_service_ids(self, context_id_str):
-        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
-            stub = ContextServiceStub(channel)
-            context_id = ContextId()
-            context_id.context_uuid.uuid = context_id_str
-            return stub.ListServiceIds(context_id)
+        This method gets the services for a given context ID by calling the ListServices method of the ContextService gRPC stub.
 
+        Args:
+            context_id_str (str): The context ID to get services for.
 
-    def get_services(self, context_id_str):
+        Returns:
+            ListServicesResponse: A response object containing the services.
+        """
+        
         with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
             stub = ContextServiceStub(channel)
             context_id = ContextId()
             context_id.context_uuid.uuid = context_id_str
-            return stub.ListServices(context_id)
 
+            return stub.ListServices(context_id)
 
     def get_service_id(self, context_id):
-        service_id_list = self.get_service_ids(context_id)
-        service_id = None
-        for s_id in service_id_list.service_ids:
-            if (
-                s_id.service_uuid.uuid == "0eaa0752-c7b6-4c2e-97da-317fbfee5112"
-            ):  # TODO: Change this identifier to the L3VPN service identifier with the real router for the demo v2
-                service_id = s_id
-                break
+        """
+        Gets the service ID for a given context ID.
 
-        return service_id
+        This method gets the service ID for a given context ID by calling the get_services method and searching for the first service in the list with the service type of SERVICETYPE_L3NM.
 
+        Args:
+            context_id (str): The context ID to get the service ID for.
 
-    def get_service_id2(self, context_id):
+        Returns:
+            str: The service ID.
+        """
+    
         service_list = self.get_services(context_id)
         service_id = None
+
         for s in service_list.services:
             if s.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
                 service_id = s.service_id
                 break
             else:
                 pass
-        return service_id
 
+        return service_id
 
     def get_endpoint_id(self, context_id):
+        """
+        Gets the endpoint ID for a given context ID.
+
+        This method gets the endpoint ID for a given context ID by calling the get_services method and searching for the first service in the list with the service type of SERVICETYPE_L3NM.
+
+        Args:
+            context_id (str): The context ID to get the endpoint ID for.
+
+        Returns:
+            str: The endpoint ID.
+        """
+    
         service_list = self.get_services(context_id)
         endpoint_id = None
+
         for s in service_list.services:
             if s.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
                 endpoint_id = s.service_endpoint_ids[0]
                 break
-        return endpoint_id
 
+        return endpoint_id
 
     def get_features_ids(self):
-        return self.cad.GetFeaturesIds(Empty()).auto_features
+        """
+        Gets the feature IDs used by the Centralized Attack Detector model.
+
+        This method gets the feature IDs used by the Centralized Attack Detector model by calling the GetFeaturesIds method of the Centralized Attack Detector gRPC stub.
 
+        Args:
+            None.
+
+        Returns:
+            list: A list of feature IDs.
+        """
+        
+        return self.cad.GetFeaturesIds(Empty()).auto_features
 
     def check_types(self):
+        """
+        Checks the types of the features that will be sent to the Centralized Attack Detector.
+
+        This method checks the types of the Centralized Attack Detector features to ensure that they are of the correct type. If any of the types are incorrect, the method raises an AssertionError.
+
+        Args:
+            None.
+
+        Returns:
+            None.
+        """
+    
         for feature in self.cad_features["features"]:
             assert isinstance(feature, float)
 
@@ -230,8 +357,29 @@ class l3_distributedattackdetector():
         assert isinstance(self.cad_features["connection_metadata"]["time_start"], float)
         assert isinstance(self.cad_features["connection_metadata"]["time_end"], float)
 
-
     def insert_connection(self):
+        """
+        Inserts a new connection into the `connections_dict` instance variable.
+
+        This method inserts a new connection into the `connections_dict` instance variable. The method uses the `conn_id` instance variable to create a new dictionary entry for the connection. If the connection already exists in the dictionary, the method updates the `time_end` value of the existing entry. If the connection doesn't exist in the dictionary, the method creates a new entry with the following keys:
+        - "ip_o": The source IP address of the connection.
+        - "port_o": The source port of the connection.
+        - "ip_d": The destination IP address of the connection.
+        - "port_d": The destination port of the connection.
+        - "flow_id": The flow ID of the connection.
+        - "service_id": The service ID of the connection.
+        - "endpoint_id": The endpoint ID of the connection.
+        - "protocol": The protocol of the connection.
+        - "time_start": The start time of the connection.
+        - "time_end": The end time of the connection.
+
+        Args:
+            None.
+
+        Returns:
+            None.
+        """
+        
         try:
             self.connections_dict[self.conn_id]["time_end"] = time.time()
         except KeyError:
@@ -241,19 +389,53 @@ class l3_distributedattackdetector():
             self.connections_dict[self.conn_id]["ip_o"] = self.conn_id[0]
             self.connections_dict[self.conn_id]["port_o"] = self.conn_id[1]
             self.connections_dict[self.conn_id]["flow_id"] = ":".join(self.conn_id)
-            self.connections_dict[self.conn_id]["service_id"] = self.get_service_id2(CONTEXT_ID)
+            self.connections_dict[self.conn_id]["service_id"] = self.get_service_id(CONTEXT_ID)
             self.connections_dict[self.conn_id]["endpoint_id"] = self.get_endpoint_id(CONTEXT_ID)
             self.connections_dict[self.conn_id]["protocol"] = "TCP"
             self.connections_dict[self.conn_id]["ip_d"] = self.conn_id[2]
             self.connections_dict[self.conn_id]["port_d"] = self.conn_id[3]
 
-
     def check_if_connection_is_attack(self):
-        if self.conn_id[0] in ATTACK_IPS or self.conn_id[2] in ATTACK_IPS:
-            LOGGER.info("Attack detected. Origin: {0}, destination: {1}".format(self.conn_id[0], self.conn_id[2]))
+        """
+        Checks if a connection is an attack based on known attack IP addresses.
+
+        This method checks if a connection is an attack based on known attack IP addresses. The method uses the `conn_id` and `known_attack_ips` instance variables to determine if the source or destination IP address of the connection is in the list of known attack IP addresses. If either IP address is in the list, the method logs a message indicating that an attack has been detected.
 
+        Args:
+            None.
+
+        Returns:
+            None.
+        """
+    
+        if self.conn_id[0] in self.known_attack_ips or self.conn_id[2] in self.known_attack_ips:
+            LOGGER.info("Attack detected. Origin IP address: {0}, destination IP address: {1}".format(self.conn_id[0], self.conn_id[2]))
 
     def create_cad_features(self):
+        """
+        Creates a dictionary of features and connection metadata for the Centralized Attack Detector.
+
+        This method creates a dictionary of features and connection metadata for the Centralized Attack Detector. The method uses the `new_connections` and `connections_dict` instance variables to obtain the necessary data. The resulting dictionary contains the following keys:
+        - "features": A list of the first 10 features of the connection.
+        - "connection_metadata": A dictionary containing the following keys:
+            - "ip_o": The source IP address of the connection.
+            - "port_o": The source port of the connection.
+            - "ip_d": The destination IP address of the connection.
+            - "port_d": The destination port of the connection.
+            - "flow_id": The flow ID of the connection.
+            - "service_id": The service ID of the connection.
+            - "endpoint_id": The endpoint ID of the connection.
+            - "protocol": The protocol of the connection.
+            - "time_start": The start time of the connection.
+            - "time_end": The end time of the connection.
+
+        Args:
+            None.
+
+        Returns:
+            None.
+        """
+        
         self.cad_features = {
             "features": self.new_connections[self.conn_id][0:10],
             "connection_metadata": {
@@ -270,8 +452,19 @@ class l3_distributedattackdetector():
             },
         }
 
-
     async def send_batch_async(self, metrics_list_pb):
+        """
+        Sends a batch of traffic data to a Centralized Attack Detector.
+
+        This method sends a batch of traffic data to a Centralized Attack Detector for analysis. The method creates a `L3CentralizedattackdetectorBatchInput` object from the provided `metrics_list_pb`, and sends the batch using an executor to run the `AnalyzeBatchConnectionStatistics` method in a separate thread. The method returns `None`.
+
+        Args:
+            metrics_list_pb (List[L3CentralizedattackdetectorMetrics]): A list of traffic metrics to send to the Centralized Attack Detector.
+
+        Returns:
+            None.
+        """
+        
         loop = asyncio.get_running_loop()
 
         # Create metrics batch
@@ -279,18 +472,27 @@ class l3_distributedattackdetector():
         metrics_batch.metrics.extend(metrics_list_pb)
 
         # Send batch
-        future = loop.run_in_executor(
-            None, self.cad.AnalyzeBatchConnectionStatistics, metrics_batch
-        )
+        future = loop.run_in_executor(None, self.cad.AnalyzeBatchConnectionStatistics, metrics_batch)
 
         try:
             await future
         except Exception as e:
             LOGGER.error(f"Error sending batch: {e}")
 
-
     async def send_data(self, metrics_list_pb, send_data_times):
-        # Send to CAD
+        """
+        Sends traffic data to a Centralized Attack Detector.
+
+        This method sends traffic data to a Centralized Attack Detector for analysis. If the `SEND_DATA_IN_BATCHES` flag is set to `True`, the data is sent in batches of size `BATCH_SIZE`. Otherwise, the data is sent one metric at a time. The method returns the updated `metrics_list_pb` and `send_data_times` arrays.
+
+        Args:
+            metrics_list_pb (List[L3CentralizedattackdetectorMetrics]): A list of traffic metrics to send to the Centralized Attack Detector.
+            send_data_times (np.ndarray): An array of times it took to send each batch of data.
+
+        Returns:
+            Tuple[List[L3CentralizedattackdetectorMetrics], np.ndarray]: A tuple containing the updated `metrics_list_pb` and `send_data_times` arrays.
+        """
+        
         if SEND_DATA_IN_BATCHES:
             if len(metrics_list_pb) == BATCH_SIZE:
                 send_data_time_start = time.time()
@@ -311,8 +513,16 @@ class l3_distributedattackdetector():
 
         return metrics_list_pb, send_data_times
 
-
     async def process_traffic(self):
+        """ Processes traffic data from a Tstat log file.
+        This method reads traffic data from a Tstat log file, processes each line of data, and sends the resulting metrics to a Centralized Attack Detector. It runs indefinitely until the `STOP` flag is set to `True`.
+
+        Args:
+            None.
+        Returns:
+            None.
+        """
+        
         LOGGER.info("Loading Tstat log file...")
         logfile = open(self.load_file(), "r")
 
@@ -326,35 +536,36 @@ class l3_distributedattackdetector():
         metrics_list_pb = []
 
         LOGGER.info("Starting to process data...")
-        
+
         index = 0
         while True:
             line = next(loglines, None)
 
             while line is None:
                 LOGGER.info("Waiting for new data...")
+
                 time.sleep(1 / 100)
                 line = next(loglines, None)
+
             if index == 0 and IGNORE_FIRST_LINE_TSTAT:
                 index = index + 1
                 continue
+
             if STOP:
                 break
 
             num_lines += 1
             start = time.time()
             line_id = line.split(" ")
+
             self.conn_id = (line_id[0], line_id[1], line_id[14], line_id[15])
             self.new_connections[self.conn_id] = self.process_line(line)
 
             self.check_if_connection_is_attack()
-
             self.insert_connection()
-
             self.create_cad_features()
-            
             self.check_types()
-            
+
             connection_metadata = ConnectionMetadata(**self.cad_features["connection_metadata"])
             metrics = L3CentralizedattackdetectorMetrics()
 
@@ -368,9 +579,9 @@ class l3_distributedattackdetector():
 
             metrics_list_pb, send_data_times = await self.send_data(metrics_list_pb, send_data_times)
 
-            index = index + 1
-            
+            index += 1
+
             process_time.append(time.time() - start)
-            
+
             if num_lines % 10 == 0:
-                LOGGER.info(f"Number of lines: {num_lines} - Average processing time: {sum(process_time) / num_lines}")
\ No newline at end of file
+                LOGGER.info(f"Number of lines: {num_lines} - Average processing time: {sum(process_time) / num_lines}")
diff --git a/src/monitoring/service/EventTools.py b/src/monitoring/service/EventTools.py
index a840cde455fd37599bc02e8802c9cf41b4515428..468e0c58cc43d1c3ecf1232ebf2a9ed46a91e93b 100644
--- a/src/monitoring/service/EventTools.py
+++ b/src/monitoring/service/EventTools.py
@@ -18,6 +18,7 @@ from common.method_wrappers.ServiceExceptions import ServiceException
 from common.proto import monitoring_pb2
 from common.proto.context_pb2 import ConfigActionEnum, DeviceOperationalStatusEnum, Empty, EventTypeEnum
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from monitoring.client.MonitoringClient import MonitoringClient
 from monitoring.service.MonitoringServiceServicerImpl import LOGGER
@@ -43,7 +44,8 @@ class EventsDeviceCollector:
 
         self._device_thread   = threading.Thread(target=self._collect, args=(self._device_stream,), daemon=False)
 
-        self._device_to_state : Dict[str, DeviceOperationalStatusEnum] = dict()
+        #self._device_to_state : Dict[str, DeviceOperationalStatusEnum] = dict()
+        self._device_endpoint_monitored : Dict[str, Dict[str, bool]] = dict()
         self._name_mapping = name_mapping
 
     def grpc_server_on(self):
@@ -79,33 +81,26 @@ class EventsDeviceCollector:
             kpi_id_list = []
 
             while True:
-                # LOGGER.info('getting Kpi by KpiID')
                 try:
                     event = self.get_event(block=True, timeout=0.5)
 
                     event_type = event.event.event_type
                     device_uuid = event.device_id.device_uuid.uuid
                     if event_type in {EventTypeEnum.EVENTTYPE_REMOVE}:
-                        self._device_to_state.pop(device_uuid, None)
+                        LOGGER.debug('Ignoring REMOVE event: {:s}'.format(grpc_message_to_json_string(event)))
+                        self._device_endpoint_monitored.pop(device_uuid, None)
                         continue
 
                     if event_type not in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}:
-                        # Unknown event type
+                        LOGGER.debug('Ignoring UNKNOWN event type: {:s}'.format(grpc_message_to_json_string(event)))
                         continue
 
                     device = self._context_client.GetDevice(event.device_id)
                     self._name_mapping.set_device_name(device_uuid, device.name)
 
-                    old_operational_status = self._device_to_state.get(device_uuid, DEVICE_OP_STATUS_UNDEFINED)
-                    device_was_not_enabled = (old_operational_status in DEVICE_OP_STATUS_NOT_ENABLED)
-
-                    new_operational_status = device.device_operational_status
-                    device_is_enabled = (new_operational_status == DEVICE_OP_STATUS_ENABLED)
-                    self._device_to_state[device_uuid] = new_operational_status
-
-                    activate_monitoring = device_was_not_enabled and device_is_enabled
-                    if not activate_monitoring:
-                        # device is not ready for monitoring
+                    device_op_status = device.device_operational_status
+                    if device_op_status != DEVICE_OP_STATUS_ENABLED:
+                        LOGGER.debug('Ignoring Device not enabled: {:s}'.format(grpc_message_to_json_string(device)))
                         continue
 
                     enabled_endpoint_names = set()
@@ -120,26 +115,41 @@ class EventsDeviceCollector:
                         if not json_resource_value['enabled']: continue
                         enabled_endpoint_names.add(json_resource_value['name'])
 
+                    endpoints_monitored = self._device_endpoint_monitored.setdefault(device_uuid, dict())
                     for endpoint in device.device_endpoints:
                         endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
                         endpoint_name_or_uuid = endpoint.name
                         if endpoint_name_or_uuid is None or len(endpoint_name_or_uuid) == 0:
                             endpoint_name_or_uuid = endpoint_uuid
-                        if endpoint_name_or_uuid not in enabled_endpoint_names: continue
 
                         self._name_mapping.set_endpoint_name(endpoint_uuid, endpoint.name)
 
-                        for value in endpoint.kpi_sample_types:
-                            if value == KPISAMPLETYPE_UNKNOWN: continue
-
-                            kpi_descriptor = monitoring_pb2.KpiDescriptor()
-                            kpi_descriptor.kpi_description = device.device_type
-                            kpi_descriptor.kpi_sample_type = value
-                            kpi_descriptor.device_id.CopyFrom(device.device_id)         # pylint: disable=no-member
-                            kpi_descriptor.endpoint_id.CopyFrom(endpoint.endpoint_id)   # pylint: disable=no-member
+                        endpoint_was_monitored = endpoints_monitored.get(endpoint_uuid, False)
+                        endpoint_is_enabled = (endpoint_name_or_uuid in enabled_endpoint_names)
+
+                        if not endpoint_was_monitored and endpoint_is_enabled:
+                            # activate
+                            for value in endpoint.kpi_sample_types:
+                                if value == KPISAMPLETYPE_UNKNOWN: continue
+
+                                kpi_descriptor = monitoring_pb2.KpiDescriptor()
+                                kpi_descriptor.kpi_description = device.device_type
+                                kpi_descriptor.kpi_sample_type = value
+                                kpi_descriptor.device_id.CopyFrom(device.device_id)         # pylint: disable=no-member
+                                kpi_descriptor.endpoint_id.CopyFrom(endpoint.endpoint_id)   # pylint: disable=no-member
+
+                                kpi_id = self._monitoring_client.SetKpi(kpi_descriptor)
+                                kpi_id_list.append(kpi_id)
+                            endpoints_monitored[endpoint_uuid] = True
+                        else:
+                            MSG = 'Not implemented condition: event={:s} device={:s} endpoint={:s}' + \
+                                  ' endpoint_was_monitored={:s} endpoint_is_enabled={:s}'
+                            LOGGER.warning(MSG.format(
+                                grpc_message_to_json_string(event), grpc_message_to_json_string(device),
+                                grpc_message_to_json_string(endpoint), str(endpoint_was_monitored),
+                                str(endpoint_is_enabled)
+                            ))
 
-                            kpi_id = self._monitoring_client.SetKpi(kpi_descriptor)
-                            kpi_id_list.append(kpi_id)
                 except queue.Empty:
                     break
 
diff --git a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
index 5a95f0e6edd200251a27b1e3571b719ebd102e1b..e7fb00029f15d82dbe80c8fff13d098ca5b29f30 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
@@ -2270,8 +2270,8 @@ public class Serializer {
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_P4;
             case IETF_NETWORK_TOPOLOGY:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY;
-            case ONF_TR_352:
-                return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352;
+            case ONF_TR_532:
+                return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532;
             case XR:
                 return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR;
             case IETF_L2VPN:
@@ -2293,8 +2293,8 @@ public class Serializer {
                 return DeviceDriverEnum.P4;
             case DEVICEDRIVER_IETF_NETWORK_TOPOLOGY:
                 return DeviceDriverEnum.IETF_NETWORK_TOPOLOGY;
-            case DEVICEDRIVER_ONF_TR_352:
-                return DeviceDriverEnum.ONF_TR_352;
+            case DEVICEDRIVER_ONF_TR_532:
+                return DeviceDriverEnum.ONF_TR_532;
             case DEVICEDRIVER_XR:
                 return DeviceDriverEnum.XR;
             case DEVICEDRIVER_IETF_L2VPN:
diff --git a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
index ad763e35dfeef71c2f9f73dbf51785a3e03c0e0d..e4198b9d03a4afcaef71a6311a244072ded2eab0 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java
@@ -22,7 +22,7 @@ public enum DeviceDriverEnum {
     TRANSPORT_API,
     P4,
     IETF_NETWORK_TOPOLOGY,
-    ONF_TR_352,
+    ONF_TR_532,
     XR,
     IETF_L2VPN
 }
diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
index 32055a1820365ffc0f048fa70e99df5f3369fd81..fb60ef8d1a82417f858fe63845b76b27099f488e 100644
--- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
+++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
@@ -3602,8 +3602,8 @@ class SerializerTest {
                         DeviceDriverEnum.IETF_NETWORK_TOPOLOGY,
                         ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY),
                 Arguments.of(
-                        DeviceDriverEnum.ONF_TR_352,
-                        ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352),
+                        DeviceDriverEnum.ONF_TR_532,
+                        ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532),
                 Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR),
                 Arguments.of(
                         DeviceDriverEnum.IETF_L2VPN,
diff --git a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
index a605e30de68ae66866a78d53863412937ceea890..be75963507751fbf5a8a1c92101de4023fd50e63 100644
--- a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
+++ b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
@@ -170,9 +170,9 @@ public final class ContextOuterClass {
      */
     DEVICEDRIVER_IETF_NETWORK_TOPOLOGY(4),
     /**
-     * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code>
+     * <code>DEVICEDRIVER_ONF_TR_532 = 5;</code>
      */
-    DEVICEDRIVER_ONF_TR_352(5),
+    DEVICEDRIVER_ONF_TR_532(5),
     /**
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
@@ -213,9 +213,9 @@ public final class ContextOuterClass {
      */
     public static final int DEVICEDRIVER_IETF_NETWORK_TOPOLOGY_VALUE = 4;
     /**
-     * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code>
+     * <code>DEVICEDRIVER_ONF_TR_532 = 5;</code>
      */
-    public static final int DEVICEDRIVER_ONF_TR_352_VALUE = 5;
+    public static final int DEVICEDRIVER_ONF_TR_532_VALUE = 5;
     /**
      * <code>DEVICEDRIVER_XR = 6;</code>
      */
@@ -259,7 +259,7 @@ public final class ContextOuterClass {
         case 2: return DEVICEDRIVER_TRANSPORT_API;
         case 3: return DEVICEDRIVER_P4;
         case 4: return DEVICEDRIVER_IETF_NETWORK_TOPOLOGY;
-        case 5: return DEVICEDRIVER_ONF_TR_352;
+        case 5: return DEVICEDRIVER_ONF_TR_532;
         case 6: return DEVICEDRIVER_XR;
         case 7: return DEVICEDRIVER_IETF_L2VPN;
         case 8: return DEVICEDRIVER_GNMI_OPENCONFIG;
@@ -74215,7 +74215,7 @@ public final class ContextOuterClass {
       "RIVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER_TRANS" +
       "PORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n\"DEVIC" +
       "EDRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n\027DEVIC" +
-      "EDRIVER_ONF_TR_352\020\005\022\023\n\017DEVICEDRIVER_XR\020" +
+      "EDRIVER_ONF_TR_532\020\005\022\023\n\017DEVICEDRIVER_XR\020" +
       "\006\022\033\n\027DEVICEDRIVER_IETF_L2VPN\020\007\022 \n\034DEVICE" +
       "DRIVER_GNMI_OPENCONFIG\020\010*\217\001\n\033DeviceOpera" +
       "tionalStatusEnum\022%\n!DEVICEOPERATIONALSTA" +
diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py
index 1b22c5c42e908e9b9455358edd2abf54442628f5..430e25938601d522187046b0ebd4cad6971261bb 100644
--- a/src/service/service/service_handler_api/FilterFields.py
+++ b/src/service/service/service_handler_api/FilterFields.py
@@ -33,7 +33,7 @@ DEVICE_DRIVER_VALUES = {
     DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
     DeviceDriverEnum.DEVICEDRIVER_P4,
     DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
-    DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352,
+    DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532,
     DeviceDriverEnum.DEVICEDRIVER_XR,
     DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN,
     DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG,
diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py
index 7ea0d4f627b5d6010f6e40135f6005471efe8d71..cb926e5b767ae56ea2024aad7cb9afa632f9d6bb 100644
--- a/src/service/service/service_handlers/__init__.py
+++ b/src/service/service/service_handlers/__init__.py
@@ -71,7 +71,7 @@ SERVICE_HANDLERS = [
     (MicrowaveServiceHandler, [
         {
             FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_L2NM,
-            FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352],
+            FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532],
         }
     ]),
     (P4ServiceHandler, [
diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh
index 08a8dfaad5cdb0cb28fafb618f9b932e630bd114..0c1f57387ee66ed9809695d14a9a8dad7ccda4c5 100755
--- a/src/tests/ofc22/deploy_specs.sh
+++ b/src/tests/ofc22/deploy_specs.sh
@@ -20,7 +20,24 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice compute webui"
+
+# Uncomment to activate Monitoring
+export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+export TFS_COMPONENTS="${TFS_COMPONENTS} automation"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
@@ -31,6 +48,12 @@ export TFS_K8S_NAMESPACE="tfs"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
+# Uncomment to monitor performance of components
+export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
@@ -43,6 +66,12 @@ export TFS_SKIP_BUILD=""
 # Set the namespace where CockroackDB will be deployed.
 export CRDB_NAMESPACE="crdb"
 
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
 # Set the database username to be used by Context.
 export CRDB_USERNAME="tfs"
 
@@ -68,6 +97,12 @@ export CRDB_REDEPLOY=""
 # Set the namespace where NATS will be deployed.
 export NATS_NAMESPACE="nats"
 
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4222"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8222"
+
 # Disable flag for re-deploying NATS from scratch.
 export NATS_REDEPLOY=""
 
@@ -77,6 +112,15 @@ export NATS_REDEPLOY=""
 # Set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE="qdb"
 
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8812"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9009"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9000"
+
 # Set the database username to be used for QuestDB.
 export QDB_USERNAME="admin"
 
@@ -86,8 +130,20 @@ export QDB_PASSWORD="quest"
 # Set the table name to be used by Monitoring for KPIs.
 export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
 
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
 # Disable flag for dropping tables if they exist.
 export QDB_DROP_TABLES_IF_EXIST="YES"
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json
index 1e16b71169d721fe1de0befe212e187861ddf2de..e0ac8b3ecee916fa1c7e57adcd5d32f0b180924b 100644
--- a/src/tests/ofc22/descriptors_emulated.json
+++ b/src/tests/ofc22/descriptors_emulated.json
@@ -8,7 +8,7 @@
     "devices": [
         {
             "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "device_type": "emu-packet-router",
-            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
@@ -22,7 +22,7 @@
         },
         {
             "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "device_type": "emu-packet-router",
-            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
@@ -36,7 +36,7 @@
         },
         {
             "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "device_type": "emu-packet-router",
-            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
@@ -50,7 +50,7 @@
         },
         {
             "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "device_type": "emu-packet-router",
-            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
@@ -64,7 +64,7 @@
         },
         {
             "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "device_type": "emu-open-line-system",
-            "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [],
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
             "device_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py
index ca1882aaa22ff1ac20d0b1927199a6594a6c441a..f149604929bfa305377545b2fdab50751dded7a1 100644
--- a/src/tests/ofc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ofc22/tests/test_functional_bootstrap.py
@@ -14,7 +14,7 @@
 
 import logging, time
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, Empty
+from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty
 from common.proto.monitoring_pb2 import KpiDescriptorList
 from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
@@ -46,6 +46,27 @@ def test_scenario_bootstrap(
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
 
+def test_scenario_devices_enabled(
+    context_client : ContextClient,         # pylint: disable=redefined-outer-name
+) -> None:
+    """
+    This test validates that the devices are enabled.
+    """
+    DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+    num_devices = -1
+    num_devices_enabled, num_retry = 0, 0
+    while (num_devices != num_devices_enabled) and (num_retry < 10):
+        time.sleep(1.0)
+        response = context_client.ListDevices(Empty())
+        num_devices = len(response.devices)
+        num_devices_enabled = 0
+        for device in response.devices:
+            if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue
+            num_devices_enabled += 1
+        LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices))
+        num_retry += 1
+    assert num_devices_enabled == num_devices
 
 def test_scenario_kpis_created(
     context_client : ContextClient,         # pylint: disable=redefined-outer-name
@@ -67,7 +88,7 @@ def test_scenario_kpis_created(
     LOGGER.info('Num KPIs expected: {:d}'.format(num_kpis_expected))
 
     num_kpis_created, num_retry = 0, 0
-    while (num_kpis_created != num_kpis_expected) and (num_retry < 5):
+    while (num_kpis_created != num_kpis_expected) and (num_retry < 10):
         response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty())
         num_kpis_created = len(response.kpi_descriptor_list)
         LOGGER.info('Num KPIs created: {:d}'.format(num_kpis_created))
diff --git a/src/tests/ofc23/deploy_specs_child.sh b/src/tests/ofc23/deploy_specs_child.sh
index 4d2b3502294925d82f675263fd6bddea62ec181a..94c0d4de00c56c8b9b435eb734e112c4f34ab25e 100755
--- a/src/tests/ofc23/deploy_specs_child.sh
+++ b/src/tests/ofc23/deploy_specs_child.sh
@@ -20,8 +20,22 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-#automation monitoring load_generator
-export TFS_COMPONENTS="context device pathcomp service slice compute webui"
+export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
@@ -32,6 +46,12 @@ export TFS_K8S_NAMESPACE="tfs-child"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="ofc23/tfs-ingress-child.yaml"
 
+# Uncomment to monitor performance of components
+export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
@@ -116,3 +136,12 @@ export QDB_DROP_TABLES_IF_EXIST="YES"
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/ofc23/deploy_specs_parent.sh b/src/tests/ofc23/deploy_specs_parent.sh
index 808f4e28734be71e6eb7fb2aced39211fd8e7f24..6bd4dc33b324fb7a301c13695901c28d2375d2d7 100755
--- a/src/tests/ofc23/deploy_specs_parent.sh
+++ b/src/tests/ofc23/deploy_specs_parent.sh
@@ -20,8 +20,22 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-#automation monitoring load_generator
-export TFS_COMPONENTS="context device pathcomp service slice compute webui"
+export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
@@ -32,6 +46,12 @@ export TFS_K8S_NAMESPACE="tfs-parent"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="ofc23/tfs-ingress-parent.yaml"
 
+# Uncomment to monitor performance of components
+export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
@@ -116,3 +136,12 @@ export QDB_DROP_TABLES_IF_EXIST="YES"
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/ofc23/deploy_specs_sligrp.sh b/src/tests/ofc23/deploy_specs_sligrp.sh
index 90bea4567bd35d845abf943670f8aa33070dff57..0422c7b103f0fd07cfa9e3b0ea69dcb9a4f7cd05 100755
--- a/src/tests/ofc23/deploy_specs_sligrp.sh
+++ b/src/tests/ofc23/deploy_specs_sligrp.sh
@@ -20,9 +20,23 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-#automation monitoring load_generator
 export TFS_COMPONENTS="context device pathcomp service slice webui load_generator"
 
+# Uncomment to activate Monitoring
+export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
@@ -32,6 +46,12 @@ export TFS_K8S_NAMESPACE="tfs-sligrp"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
+# Uncomment to monitor performance of components
+export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
@@ -116,3 +136,12 @@ export QDB_DROP_TABLES_IF_EXIST="YES"
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/Scenario.md b/src/tests/scenario2/Scenario.md
index 8dad4691ade669522b5c82a5e4ed07e5d0279492..964c8b2d089cd9316dd5d7f8061712160408bc48 100644
--- a/src/tests/scenario2/Scenario.md
+++ b/src/tests/scenario2/Scenario.md
@@ -1,47 +1,64 @@
-# Scenario:
-
-- 4 TFS instances
-
-    - domain D1 (source for e-2-e service)
-        5 routers + 1 DC
-        R1@D1/2 <--> R2@D1/1
-        R2@D1/3 <--> R3@D1/2
-        R2@D1/5 <--> R5@D1/2
-        R3@D1/4 <--> R4@D1/3
-        R4@D1/5 <--> R5@D1/4
-        R5@D1/1 <--> R1@D1/5
-        R1@D1/100 <--> DCGW@D1/eth1
-
-    - domain D2 (transit for e-2-e service)
-        6 routers
-        R1@D2/2 <--> R2@D2/1
-        R1@D2/6 <--> R6@D2/1
-        R1@D2/5 <--> R5@D2/1
-        R2@D2/3 <--> R3@D2/2
-        R2@D2/4 <--> R4@D2/2
-        R2@D2/5 <--> R5@D2/2
-        R2@D2/6 <--> R6@D2/2
-        R3@D2/6 <--> R6@D2/3
-        R4@D2/5 <--> R5@D2/4
-
-    - domain D3 (transit for e-2-e service)
-        4 routers
-        R1@D3/2 <--> R2@D3/1
-        R2@D3/3 <--> R3@D3/2
-        R3@D3/4 <--> R4@D3/3
-        R4@D3/1 <--> R1@D3/4
-        R2@D3/4 <--> R4@D3/2
-
-    - domain D4 (end for e-2-e service)
-        3 routers
-        R1@D4/2 <--> R2@D4/1
-        R1@D4/3 <--> R3@D4/1
-        R2@D4/3 <--> R3@D4/2
-        R3@D4/100 <--> DCGW@D4/eth1
-
-    - interdomain links
-        R4@D1/10 <--> R1@D2/10
-        R5@D1/10 <--> R1@D3/10
-        R4@D2/10 <--> R2@D4/10
-        R5@D2/10 <--> R2@D3/10
-        R3@D3/10 <--> R1@D4/10
+# Scenario Description
+
+This scenario is composed of 4 TeraFlowSDN instances.
+Each instance has its own local network topology detailed below.
+Besides, each instance exposes an abstracted view of its local network domain.
+Finally, the different domains are interconnected among them by means of the inter-domain links detailed below.
+
+## Domain D1 (end for the end-to-end interdomain slice)
+
+Domain D1 is composed of 5 emulated packet routers (Rx@D1) and 1 emulated DataCenter (DCGW@D1).
+The DCGW@D1 is a termination endpoint for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D1/2 <--> R2@D1/1
+- R2@D1/3 <--> R3@D1/2
+- R2@D1/5 <--> R5@D1/2
+- R3@D1/4 <--> R4@D1/3
+- R4@D1/5 <--> R5@D1/4
+- R5@D1/1 <--> R1@D1/5
+- R1@D1/100 <--> DCGW@D1/eth1
+
+## Domain D2 (transit for the end-to-end interdomain slice)
+
+Domain D2 is composed of 6 emulated packet routers (Rx@D2).
+This domain behaves as a transit domain for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D2/2 <--> R2@D2/1
+- R1@D2/6 <--> R6@D2/1
+- R1@D2/5 <--> R5@D2/1
+- R2@D2/3 <--> R3@D2/2
+- R2@D2/4 <--> R4@D2/2
+- R2@D2/5 <--> R5@D2/2
+- R2@D2/6 <--> R6@D2/2
+- R3@D2/6 <--> R6@D2/3
+- R4@D2/5 <--> R5@D2/4
+
+## Domain D3 (transit for the end-to-end interdomain slice)
+
+Domain D3 is composed of 6 emulated packet routers (Rx@D3).
+This domain behaves as a transit domain for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D3/2 <--> R2@D3/1
+- R2@D3/3 <--> R3@D3/2
+- R3@D3/4 <--> R4@D3/3
+- R4@D3/1 <--> R1@D3/4
+- R2@D3/4 <--> R4@D3/2
+
+## Domain D4 (end for the end-to-end interdomain slice)
+
+Domain D4 is composed of 3 emulated packet routers (Rx@D4) and 1 emulated DataCenter (DCGW@D4).
+The DCGW@D4 is a termination endpoint for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D4/2 <--> R2@D4/1
+- R1@D4/3 <--> R3@D4/1
+- R2@D4/3 <--> R3@D4/2
+- R3@D4/100 <--> DCGW@D4/eth1
+
+## Inter-domain Connectivity
+
+The 4 domains are interconnected among them by means of the following inter-domain links:
+- R4@D1/10 <--> R1@D2/10
+- R5@D1/10 <--> R1@D3/10
+- R4@D2/10 <--> R2@D4/10
+- R5@D2/10 <--> R2@D3/10
+- R3@D3/10 <--> R1@D4/10
diff --git a/src/tests/scenario2/deploy_all.sh b/src/tests/scenario2/deploy_all.sh
index 541612db431fd73e58fd7c1699df97342c11ea70..1eac2e4da701d2f2c60f8690c4784b59522a8b72 100755
--- a/src/tests/scenario2/deploy_all.sh
+++ b/src/tests/scenario2/deploy_all.sh
@@ -23,8 +23,8 @@ kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml
 kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml
 kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml
 
-# Delete MockBlockchain
-#kubectl delete namespace tfs-bchain
+# Delete MockBlockchain (comment out if using a real blockchain)
+kubectl delete namespace tfs-bchain
 
 # Create secondary ingress controllers
 kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml
@@ -32,8 +32,8 @@ kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml
 kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml
 kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml
 
-# Create MockBlockchain
-#./deploy_mock_blockchain.sh
+# Create MockBlockchain (comment out if using a real blockchain)
+./deploy/mock_blockchain.sh
 
 # Deploy TFS for Domain 1
 source nfvsdn22/deploy_specs_dom1.sh
diff --git a/src/tests/scenario2/deploy_specs_dom1.sh b/src/tests/scenario2/deploy_specs_dom1.sh
index cfe8a3bf63d875b4c579e36ff6a904e0f4b62e02..7dd777fbee12537729e8408fe671074a1e9b19f1 100755
--- a/src/tests/scenario2/deploy_specs_dom1.sh
+++ b/src/tests/scenario2/deploy_specs_dom1.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom1"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom1.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
-export TFS_SKIP_BUILD="NO"
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom1"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom1"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4223"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8223"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom1"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8813"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9011"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9001"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/deploy_specs_dom2.sh b/src/tests/scenario2/deploy_specs_dom2.sh
index 7034c22cdcdc93b6c6fc0e5227e0ef38bda95a55..bb6ce2f0c390d3253696e62bf23aa85b0d16782e 100755
--- a/src/tests/scenario2/deploy_specs_dom2.sh
+++ b/src/tests/scenario2/deploy_specs_dom2.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom2"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom2.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+# Disable skip-build flag to rebuild the Docker images.
 export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom2"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom2"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4224"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8224"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom2"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8814"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9012"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9002"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/deploy_specs_dom3.sh b/src/tests/scenario2/deploy_specs_dom3.sh
index 044301418405ba20dfaf00cd58f9a1a15e7e62a7..797d55894a143308935664f2c879260dfd2760ec 100755
--- a/src/tests/scenario2/deploy_specs_dom3.sh
+++ b/src/tests/scenario2/deploy_specs_dom3.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom3"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom3.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+# Disable skip-build flag to rebuild the Docker images.
 export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom3"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom3"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4225"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8225"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom3"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8815"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9013"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9003"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/deploy_specs_dom4.sh b/src/tests/scenario2/deploy_specs_dom4.sh
index 9e26ace470c81b0bdccfa83bf4eb7369970c981b..d2fe2abfa981d498558d263ca053093e017225d2 100755
--- a/src/tests/scenario2/deploy_specs_dom4.sh
+++ b/src/tests/scenario2/deploy_specs_dom4.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom4"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom4.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+# Disable skip-build flag to rebuild the Docker images.
 export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom4"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom4"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4226"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8226"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom4"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8816"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9014"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9004"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/dump_logs.sh b/src/tests/scenario2/dump_logs.sh
index 7b1dc9d17aabcf8866b76ed1acdb367eee0e3b51..c7acedbf613b66f3ec08bd1628e8e1ab76b9bc5e 100755
--- a/src/tests/scenario2/dump_logs.sh
+++ b/src/tests/scenario2/dump_logs.sh
@@ -13,64 +13,70 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-rm -rf tmp/exec
-
 echo "Collecting logs for MockBlockChain..."
-mkdir -p tmp/exec/mbc
-kubectl --namespace tfs-bchain logs deployments/mock-blockchain server > tmp/exec/mbc/mock-blockchain.log
+rm -rf tmp/tfs-bchain/exec
+mkdir -p tmp/tfs-bchain/exec
+kubectl --namespace tfs-bchain logs deployments/mock-blockchain server > tmp/tfs-bchain/exec/mock-blockchain.log
 printf "\n"
 
 echo "Collecting logs for Domain 1..."
-mkdir -p tmp/exec/dom1
-kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/exec/dom1/context.log
-kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/exec/dom1/device.log
-kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/dom1/service.log
-kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log
-kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log
-kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log
-kubectl --namespace tfs-dom1 logs deployments/interdomainservice server > tmp/exec/dom1/interdomain.log
-kubectl --namespace tfs-dom1 logs deployments/dltservice connector > tmp/exec/dom1/dlt-connector.log
-kubectl --namespace tfs-dom1 logs deployments/dltservice gateway > tmp/exec/dom1/dlt-gateway.log
+rm -rf tmp/tfs-dom1/exec
+mkdir -p tmp/tfs-dom1/exec
+kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/tfs-dom1/exec/context.log
+kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/tfs-dom1/exec/device.log
+kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/tfs-dom1/exec/service.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/tfs-dom1/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/tfs-dom1/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/tfs-dom1/exec/slice.log
+kubectl --namespace tfs-dom1 logs deployments/interdomainservice server > tmp/tfs-dom1/exec/interdomain.log
+kubectl --namespace tfs-dom1 logs deployments/dltservice connector > tmp/tfs-dom1/exec/dlt-connector.log
+kubectl --namespace tfs-dom1 logs deployments/dltservice gateway > tmp/tfs-dom1/exec/dlt-gateway.log
+kubectl --namespace tfs-dom1 logs deployments/webuiservice server > tmp/tfs-dom1/exec/webui.log
 printf "\n"
 
 echo "Collecting logs for Domain 2..."
-mkdir -p tmp/exec/dom2
-kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/exec/dom2/context.log
-kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/exec/dom2/device.log
-kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/dom2/service.log
-kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log
-kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log
-kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log
-kubectl --namespace tfs-dom2 logs deployments/interdomainservice server > tmp/exec/dom2/interdomain.log
-kubectl --namespace tfs-dom2 logs deployments/dltservice connector > tmp/exec/dom2/dlt-connector.log
-kubectl --namespace tfs-dom2 logs deployments/dltservice gateway > tmp/exec/dom2/dlt-gateway.log
+rm -rf tmp/tfs-dom2/exec
+mkdir -p tmp/tfs-dom2/exec
+kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/tfs-dom2/exec/context.log
+kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/tfs-dom2/exec/device.log
+kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/tfs-dom2/exec/service.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/tfs-dom2/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/tfs-dom2/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/tfs-dom2/exec/slice.log
+kubectl --namespace tfs-dom2 logs deployments/interdomainservice server > tmp/tfs-dom2/exec/interdomain.log
+kubectl --namespace tfs-dom2 logs deployments/dltservice connector > tmp/tfs-dom2/exec/dlt-connector.log
+kubectl --namespace tfs-dom2 logs deployments/dltservice gateway > tmp/tfs-dom2/exec/dlt-gateway.log
+kubectl --namespace tfs-dom2 logs deployments/webuiservice server > tmp/tfs-dom2/exec/webui.log
 printf "\n"
 
 echo "Collecting logs for Domain 3..."
-mkdir -p tmp/exec/dom3
-kubectl --namespace tfs-dom3 logs deployments/contextservice server > tmp/exec/dom3/context.log
-kubectl --namespace tfs-dom3 logs deployments/deviceservice server > tmp/exec/dom3/device.log
-kubectl --namespace tfs-dom3 logs deployments/serviceservice server > tmp/exec/dom3/service.log
-kubectl --namespace tfs-dom3 logs deployments/pathcompservice frontend > tmp/exec/dom3/pathcomp-frontend.log
-kubectl --namespace tfs-dom3 logs deployments/pathcompservice backend > tmp/exec/dom3/pathcomp-backend.log
-kubectl --namespace tfs-dom3 logs deployments/sliceservice server > tmp/exec/dom3/slice.log
-kubectl --namespace tfs-dom3 logs deployments/interdomainservice server > tmp/exec/dom3/interdomain.log
-kubectl --namespace tfs-dom3 logs deployments/dltservice connector > tmp/exec/dom3/dlt-connector.log
-kubectl --namespace tfs-dom3 logs deployments/dltservice gateway > tmp/exec/dom3/dlt-gateway.log
+rm -rf tmp/tfs-dom3/exec
+mkdir -p tmp/tfs-dom3/exec
+kubectl --namespace tfs-dom3 logs deployments/contextservice server > tmp/tfs-dom3/exec/context.log
+kubectl --namespace tfs-dom3 logs deployments/deviceservice server > tmp/tfs-dom3/exec/device.log
+kubectl --namespace tfs-dom3 logs deployments/serviceservice server > tmp/tfs-dom3/exec/service.log
+kubectl --namespace tfs-dom3 logs deployments/pathcompservice frontend > tmp/tfs-dom3/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom3 logs deployments/pathcompservice backend > tmp/tfs-dom3/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom3 logs deployments/sliceservice server > tmp/tfs-dom3/exec/slice.log
+kubectl --namespace tfs-dom3 logs deployments/interdomainservice server > tmp/tfs-dom3/exec/interdomain.log
+kubectl --namespace tfs-dom3 logs deployments/dltservice connector > tmp/tfs-dom3/exec/dlt-connector.log
+kubectl --namespace tfs-dom3 logs deployments/dltservice gateway > tmp/tfs-dom3/exec/dlt-gateway.log
+kubectl --namespace tfs-dom3 logs deployments/webuiservice server > tmp/tfs-dom3/exec/webui.log
 printf "\n"
 
 echo "Collecting logs for Domain 4..."
-mkdir -p tmp/exec/dom4
-kubectl --namespace tfs-dom4 logs deployments/contextservice server > tmp/exec/dom4/context.log
-kubectl --namespace tfs-dom4 logs deployments/deviceservice server > tmp/exec/dom4/device.log
-kubectl --namespace tfs-dom4 logs deployments/serviceservice server > tmp/exec/dom4/service.log
-kubectl --namespace tfs-dom4 logs deployments/pathcompservice frontend > tmp/exec/dom4/pathcomp-frontend.log
-kubectl --namespace tfs-dom4 logs deployments/pathcompservice backend > tmp/exec/dom4/pathcomp-backend.log
-kubectl --namespace tfs-dom4 logs deployments/sliceservice server > tmp/exec/dom4/slice.log
-kubectl --namespace tfs-dom4 logs deployments/interdomainservice server > tmp/exec/dom4/interdomain.log
-kubectl --namespace tfs-dom4 logs deployments/dltservice connector > tmp/exec/dom4/dlt-connector.log
-kubectl --namespace tfs-dom4 logs deployments/dltservice gateway > tmp/exec/dom4/dlt-gateway.log
+rm -rf tmp/tfs-dom4/exec
+mkdir -p tmp/tfs-dom4/exec
+kubectl --namespace tfs-dom4 logs deployments/contextservice server > tmp/tfs-dom4/exec/context.log
+kubectl --namespace tfs-dom4 logs deployments/deviceservice server > tmp/tfs-dom4/exec/device.log
+kubectl --namespace tfs-dom4 logs deployments/serviceservice server > tmp/tfs-dom4/exec/service.log
+kubectl --namespace tfs-dom4 logs deployments/pathcompservice frontend > tmp/tfs-dom4/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom4 logs deployments/pathcompservice backend > tmp/tfs-dom4/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom4 logs deployments/sliceservice server > tmp/tfs-dom4/exec/slice.log
+kubectl --namespace tfs-dom4 logs deployments/interdomainservice server > tmp/tfs-dom4/exec/interdomain.log
+kubectl --namespace tfs-dom4 logs deployments/dltservice connector > tmp/tfs-dom4/exec/dlt-connector.log
+kubectl --namespace tfs-dom4 logs deployments/dltservice gateway > tmp/tfs-dom4/exec/dlt-gateway.log
+kubectl --namespace tfs-dom4 logs deployments/webuiservice server > tmp/tfs-dom4/exec/webui.log
 printf "\n"
 
 echo "Done!"
diff --git a/src/tests/scenario2/reset.sh b/src/tests/scenario2/reset.sh
index 5f4a3b8e5b8a58d8f2acf7c60cde1e77c2e1873a..f9e7ecab2d4693ef161ae9349e13a3ad0200c97c 100755
--- a/src/tests/scenario2/reset.sh
+++ b/src/tests/scenario2/reset.sh
@@ -14,10 +14,38 @@
 # limitations under the License.
 
 
+# Destroy all replicas of all microservices
+
 kubectl --namespace tfs-dom1 scale --replicas=0 \
     deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
     deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
 
+kubectl --namespace tfs-dom2 scale --replicas=0 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom3 scale --replicas=0 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom4 scale --replicas=0 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+# Create a single replica per microservice
+
 kubectl --namespace tfs-dom1 scale --replicas=1 \
     deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
     deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom2 scale --replicas=1 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom3 scale --replicas=1 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom4 scale --replicas=1 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
diff --git a/src/tests/scenario2/show_deploy.sh b/src/tests/scenario2/show_deploy.sh
index 2aa8de873cf22e75be830c713fc379df9df154a4..20bbfaacef39b7b9b9c18facc3e20382e526377a 100755
--- a/src/tests/scenario2/show_deploy.sh
+++ b/src/tests/scenario2/show_deploy.sh
@@ -24,3 +24,27 @@ printf "\n"
 echo "Deployment Ingress:"
 kubectl --namespace tfs-dom1 get ingress
 printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom2 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom2 get ingress
+printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom3 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom3 get ingress
+printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom4 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom4 get ingress
+printf "\n"
diff --git a/src/tests/scenario3/l3/deploy.sh b/src/tests/scenario3/l3/deploy.sh
index e8e02b026d42ea16d5df29da8a15c291f421e52c..4f4f8baed2c29b290142f30960f8432926cef6a8 100755
--- a/src/tests/scenario3/l3/deploy.sh
+++ b/src/tests/scenario3/l3/deploy.sh
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-source deploy_specs.sh
+source src/tests/scenario3/l3/deploy_specs.sh
 ./deploy/all.sh
 source tfs_runtime_env_vars.sh
 ofc22/run_test_01_bootstrap.sh
diff --git a/src/webui/requirements.in b/src/webui/requirements.in
index b4a158d394bc2de67af1e0e99e922df08104f736..9facefbbae3c20b90304803e9ccfa4ebf4011fbb 100644
--- a/src/webui/requirements.in
+++ b/src/webui/requirements.in
@@ -17,3 +17,4 @@ Flask-WTF==1.0.0
 flask-healthz==0.0.3
 flask-unittest==0.1.2
 lorem-text==2.1
+werkzeug==2.3.7
\ No newline at end of file
diff --git a/src/webui/service/device/forms.py b/src/webui/service/device/forms.py
index e884e96a511ab90625bda257075c80adce4406cd..bcd5804d32927763344d08371320fdde5f2fcab7 100644
--- a/src/webui/service/device/forms.py
+++ b/src/webui/service/device/forms.py
@@ -27,7 +27,7 @@ class AddDeviceForm(FlaskForm):
     device_drivers_transport_api = BooleanField('TRANSPORT_API')
     device_drivers_p4 = BooleanField('P4')
     device_drivers_ietf_network_topology = BooleanField('IETF_NETWORK_TOPOLOGY')
-    device_drivers_onf_tr_352 = BooleanField('ONF_TR_352')
+    device_drivers_onf_tr_532 = BooleanField('ONF_TR_532')
     device_drivers_xr = BooleanField('XR')
     device_drivers_ietf_l2vpn = BooleanField('IETF L2VPN')
     device_drivers_gnmi_openconfig = BooleanField('GNMI OPENCONFIG')
diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py
index 4590c7f01a24e801ecc775ad0b22cf0dcdea3452..ce15c7abaa527191e87dcaeee5b91599014c1e72 100644
--- a/src/webui/service/device/routes.py
+++ b/src/webui/service/device/routes.py
@@ -116,8 +116,8 @@ def add():
             device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4)
         if form.device_drivers_ietf_network_topology.data:
             device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY)
-        if form.device_drivers_onf_tr_352.data:
-            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352)
+        if form.device_drivers_onf_tr_532.data:
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532)
         if form.device_drivers_xr.data:
             device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR)
         if form.device_drivers_ietf_l2vpn.data:
@@ -149,6 +149,16 @@ def detail(device_uuid: str):
 
     return render_template(
         'device/detail.html', device=device_obj, dde=DeviceDriverEnum, dose=DeviceOperationalStatusEnum)
+    
+@device.route('inventory/<path:device_uuid>', methods=['GET', 'POST'])
+def inventory(device_uuid: str):
+    context_client.connect()
+    device_obj = get_device(context_client, device_uuid, rw_copy=False)
+    if device_obj is None:
+        flash('Device({:s}) not found'.format(str(device_uuid)), 'danger')
+        device_obj = Device()
+    context_client.close()
+    return render_template('device/inventory.html', device=device_obj)
 
 @device.get('<path:device_uuid>/delete')
 def delete(device_uuid):
diff --git a/src/webui/service/templates/device/add.html b/src/webui/service/templates/device/add.html
index c9165667dd8c089e91400f652177b4ca4ec98010..c115657aa08828849172345ca50caaeb4150fe0f 100644
--- a/src/webui/service/templates/device/add.html
+++ b/src/webui/service/templates/device/add.html
@@ -87,7 +87,7 @@
                 <br />
                 {{ form.device_drivers_p4 }} {{ form.device_drivers_p4.label(class="col-sm-3 col-form-label") }}
                 {{ form.device_drivers_ietf_network_topology }} {{form.device_drivers_ietf_network_topology.label(class="col-sm-3 col-form-label") }}
-                {{ form.device_drivers_onf_tr_352 }} {{ form.device_drivers_onf_tr_352.label(class="col-sm-3 col-form-label") }}
+                {{ form.device_drivers_onf_tr_532 }} {{ form.device_drivers_onf_tr_532.label(class="col-sm-3 col-form-label") }}
                 <br />
                 {{ form.device_drivers_xr }} {{ form.device_drivers_xr.label(class="col-sm-3 col-form-label") }}
                 {{ form.device_drivers_ietf_l2vpn }} {{ form.device_drivers_ietf_l2vpn.label(class="col-sm-3 col-form-label") }}
diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html
index c35ae163d3f8344f1ebb49241cc15a4afa3401d5..a9c069ac3a526af46392ffd2f23205a7604dbb7d 100644
--- a/src/webui/service/templates/device/detail.html
+++ b/src/webui/service/templates/device/detail.html
@@ -87,7 +87,6 @@
         </table>
     </div>
 </div>
-</div>
 
 <b>Configurations:</b>
 
diff --git a/src/webui/service/templates/device/home.html b/src/webui/service/templates/device/home.html
index 53434196f85c3a8c79fe9b861204e9bd8c6a5d8f..e356fd4fbeccc6e735d9723b8b1ca3e5fcf865ec 100644
--- a/src/webui/service/templates/device/home.html
+++ b/src/webui/service/templates/device/home.html
@@ -50,6 +50,7 @@
             <th scope="col">Status</th>
             <th scope="col">Config Rules</th>
             <th scope="col"></th>
+            <th scope="col"></th>
           </tr>
         </thead>
         <tbody>
@@ -74,6 +75,14 @@
                             </svg>
                         </a>
                     </td>
+                    <td>
+                        <a href="{{ url_for('device.inventory', device_uuid=device.device_id.device_uuid.uuid) }}">
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-pci-card" viewBox="0 0 16 16">
+                                <path d="M0 1.5A.5.5 0 0 1 .5 1h1a.5.5 0 0 1 .5.5V4h13.5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-.5.5H2v2.5a.5.5 0 0 1-1 0V2H.5a.5.5 0 0 1-.5-.5Z"/>
+                                <path d="M3 12.5h3.5v1a.5.5 0 0 1-.5.5H3.5a.5.5 0 0 1-.5-.5v-1Zm4 0h4v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1Z"/>
+                              </svg>
+                        </a>
+                    </td>
                 </tr>
                 {% endfor %}
             {% else %}
diff --git a/src/webui/service/templates/device/inventory.html b/src/webui/service/templates/device/inventory.html
new file mode 100644
index 0000000000000000000000000000000000000000..17c14785a89658b59e07373bc7d939e234937631
--- /dev/null
+++ b/src/webui/service/templates/device/inventory.html
@@ -0,0 +1,151 @@
+<!--
+    Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+   
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+   
+         http://www.apache.org/licenses/LICENSE-2.0
+   
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+   -->
+
+{% extends 'base.html' %}
+
+{% block content %}
+<style>
+    ul,
+    #myUL {
+        list-style-type: none;
+    }
+
+    #myUL {
+        margin: 0;
+        padding: 0;
+    }
+
+    .caret {
+        cursor: pointer;
+        -webkit-user-select: none;
+        /* Safari 3.1+ */
+        -moz-user-select: none;
+        /* Firefox 2+ */
+        -ms-user-select: none;
+        /* IE 10+ */
+        user-select: none;
+    }
+
+    .caret::before {
+        content: "\25B6";
+        color: black;
+        display: inline-block;
+        margin-right: 6px;
+    }
+
+    .caret-down::before {
+        -ms-transform: rotate(90deg);
+        /* IE 9 */
+        -webkit-transform: rotate(90deg);
+        /* Safari */
+        transform: rotate(90deg);
+    }
+
+    .nested {
+        display: none;
+    }
+
+    .active {
+        display: block;
+    }
+</style>
+
+<h1>Device {{ device.name }} ({{ device.device_id.device_uuid.uuid }})</h1>
+
+<div class="row mb-3">
+    <div class="col-sm-3">
+        <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('device.home') }}'">
+            <i class="bi bi-box-arrow-in-left"></i>
+            Back to device list
+        </button>
+    </div>
+</div>
+
+<br>
+<div class="row mb-3">
+    <div class="col-sm-3">
+        <ul id="myUL">
+            <li><span class="caret"></span>Components</span>
+                <ul class="nested">
+                    {% for item in (device.components|sort(true, attribute='name')) %}
+                    {% if item.parent |length < 1 or item.type=='CHASSIS' %} 
+                    <li><span class="caret"></span>{{item.name}}</span>
+                        <ul class="nested">
+                            {% for comp in (device.components|sort(true, attribute='name')) %}
+                            {% if item.name == comp.parent %}
+                            <li>{{comp.name}}</li>
+                            {% endif %}
+                            {% endfor %}
+                        </ul>
+                    </li>
+                    {% endif %}
+                    {% endfor %}
+                </ul>
+            </li>
+        </ul>
+        
+        <script>
+            var toggler = document.getElementsByClassName("caret");
+            var i;
+            
+            for (i = 0; i < toggler.length; i++) {
+              toggler[i].addEventListener("click", function() {
+                this.parentElement.querySelector(".nested").classList.toggle("active");
+                this.classList.toggle("caret-down");
+              });
+            }
+        </script>
+
+    </div>
+    {% if device.components|length > 1 %}
+    <div class="col-sm-8">
+        <table class="table table-striped table-hover">
+            <thead>
+                <tr>
+                    <th scope="col">Component UUID</th>
+                    <th scope="col">Name</th>
+                    <th scope="col">Type</th>
+                    <th scope="col">Parent</th>
+                    <th scope="col">Attributes</th>
+                </tr>
+            </thead>
+            <tbody>
+                {% for component in (device.components|sort(true, attribute='name')) %}
+                <tr>
+                    <td>
+                        {{ component.component_uuid.uuid }}
+                    </td>
+                    <td>
+                        {{ component.name }}
+                    </td>
+                    <td>
+                        {{ component.type }}
+                    </td>
+                    <td>
+                        {{ component.parent }}
+                    </td>
+                    <td>
+                        {{ component.attributes }}
+                    </td>
+                </tr>
+                {% endfor %}
+            </tbody>
+        </table>
+    </div>
+    {% endif %}
+</div>
+
+{% endblock %}