diff --git a/deploy_component.sh b/deploy_component.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a4cf6184c83ef026562abe8e084430bba3ead9c8
--- /dev/null
+++ b/deploy_component.sh
@@ -0,0 +1,186 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the URL of your local Docker registry where the images will be uploaded to.
+# Leave it blank if you do not want to use any Docker registry.
+export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""}
+#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/"
+
+TFS_COMPONENTS=$1
+
+# If not already set, set the tag you want to use for your images.
+export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+# If not already set, set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
+
+# If not already set, set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+TMP_FOLDER="./tmp"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+
+echo "Deploying component and collecting environment variables..."
+ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Processing '$COMPONENT' component..."
+    IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG"
+    IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+    echo "  Building Docker image..."
+    BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+
+    if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
+        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+    elif [ "$COMPONENT" == "pathcomp" ]; then
+        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
+        docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
+
+        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
+        docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+        # next command is redundant, but helpful to keep cache updated between rebuilds
+        docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+    else
+        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+    fi
+
+    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
+        echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
+
+        if [ "$COMPONENT" == "pathcomp" ]; then
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
+            docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL-frontend" > "$TAG_LOG"
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
+            docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL-backend" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
+            docker push "$IMAGE_URL-frontend" > "$PUSH_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
+            docker push "$IMAGE_URL-backend" > "$PUSH_LOG"
+        else
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+            docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        fi
+    fi
+
+    echo "  Adapting '$COMPONENT' manifest file..."
+    MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
+    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+
+    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
+        # Registry is set
+        if [ "$COMPONENT" == "pathcomp" ]; then
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL-frontend#g" "$MANIFEST"
+
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL-backend#g" "$MANIFEST"
+
+            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+        else
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+        fi
+    else
+        # Registry is not set
+        if [ "$COMPONENT" == "pathcomp" ]; then
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_NAME-frontend#g" "$MANIFEST"
+
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_NAME-backend#g" "$MANIFEST"
+
+            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
+        else
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
+            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
+        fi
+    fi
+
+    # TODO: harmonize names of the monitoring component
+
+    echo "  Deploying '$COMPONENT' component to Kubernetes..."
+    DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
+    kubectl --namespace $TFS_K8S_NAMESPACE delete -f "$MANIFEST" > "$DEPLOY_LOG"
+    kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+    kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+
+    echo "  Collecting env-vars for '$COMPONENT' component..."
+
+    SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json)
+    if [ -z "${SERVICE_DATA}" ]; then continue; fi
+
+    # Env vars for service's host address
+    SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
+    if [ -z "${SERVICE_HOST}" ]; then continue; fi
+    # TODO: remove previous value from file
+    ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
+    echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
+
+    # Env vars for service's 'grpc' port (if any)
+    SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
+    if [ -n "${SERVICE_PORT_GRPC}" ]; then
+        ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
+    fi
+
+    # Env vars for service's 'http' port (if any)
+    SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
+    if [ -n "${SERVICE_PORT_HTTP}" ]; then
+        ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
+    fi
+
+    printf "\n"
+done
+
+# By now, leave this control here. Some component dependencies are not well handled
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Waiting for '$COMPONENT' component..."
+    kubectl wait --namespace $TFS_K8S_NAMESPACE \
+        --for='condition=available' --timeout=300s deployment/${COMPONENT}service
+    printf "\n"
+done
+
+./show_deploy.sh
+
+echo "Done!"
diff --git a/ecoc22 b/ecoc22
new file mode 120000
index 0000000000000000000000000000000000000000..3c61895e5ac62d0b38ce058ba5ff042442542320
--- /dev/null
+++ b/ecoc22
@@ -0,0 +1 @@
+src/tests/ecoc22/
\ No newline at end of file
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index 171394f7c43b2447e898902c78d5276fe1bcbc7c..46c7557d9178d1bb2bc36eda13a088606f56cede 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -34,7 +34,7 @@ spec:
         - containerPort: 2020
         env:
         - name: LOG_LEVEL
-          value: "INFO"
+          value: "DEBUG"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:2020"]
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 75832b94fa2a6ba97617641e7b249157508614bf..efe43fe229a7f7ba862b10a04d44c6e9de06b5fb 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -34,7 +34,7 @@ spec:
         - containerPort: 3030
         env:
         - name: LOG_LEVEL
-          value: "INFO"
+          value: "DEBUG"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:3030"]
diff --git a/my_deploy.sh b/my_deploy.sh
index 67a2e0558c25d767e14b635e6dd9174433827156..e70a12e1556ab06f6daa89c316c6a6ed61c4e059 100644
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -1,13 +1,13 @@
 # Set the URL of your local Docker registry where the images will be uploaded to.
 export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
 
-# Set the list of components, separated by comas, you want to build images for, and deploy.
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
 # Supported components are:
 #   context device automation policy service compute monitoring webui
 #   interdomain slice pathcomp dlt
-#   dbscanserving opticalattackmitigator opticalcentralizedattackdetector
+#   dbscanserving opticalattackmitigator opticalattackdetector
 #   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
-export TFS_COMPONENTS="context device automation service compute monitoring webui"
+export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
diff --git a/proto/context.proto b/proto/context.proto
index 866876175f108c056f7e35c6457a1bf48a226a9c..97d6df666fd54f3b855c704fae3c792b37639382 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -188,6 +188,7 @@ message DeviceList {
 message DeviceEvent {
   Event event = 1;
   DeviceId device_id = 2;
+  DeviceConfig device_config = 3;
 }
 
 
diff --git a/run_tests_docker.sh b/run_tests_docker.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fd885140999ac0f045c162f361f0075af96a8d48
--- /dev/null
+++ b/run_tests_docker.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# Set the URL of your local Docker registry where the images will be uploaded to. Leave it blank if you do not want to
+# use any Docker registry.
+REGISTRY_IMAGE=""
+#REGISTRY_IMAGE="http://my-container-registry.local/"
+
+# Set the list of components you want to build images for, and deploy.
+COMPONENTS="context device automation policy service compute monitoring centralizedattackdetector"
+
+# Set the tag you want to use for your images.
+IMAGE_TAG="tf-dev"
+
+# Constants
+TMP_FOLDER="./tmp"
+
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+mkdir -p $TMP_LOGS_FOLDER
+
+for COMPONENT in $COMPONENTS; do
+    echo "Processing '$COMPONENT' component..."
+    IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
+    IMAGE_URL="$REGISTRY_IMAGE/$IMAGE_NAME"
+
+    echo "  Building Docker image..."
+    BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+
+    if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
+        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+    else 
+        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG"
+    fi
+
+    if [ -n "$REGISTRY_IMAGE" ]; then
+        echo "Pushing Docker image to '$REGISTRY_IMAGE'..."
+
+        TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+        docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
+
+        PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+        docker push "$IMAGE_URL" > "$PUSH_LOG"
+    fi
+done
+
+echo "Preparing for running the tests..."
+
+if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi  
+
+for COMPONENT in $COMPONENTS; do
+    IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
+    echo "  Running tests for $COMPONENT:"
+    docker run -it -d --name $COMPONENT $IMAGE_NAME --network=teraflowbridge
+    docker exec -it $COMPONENT bash -c "pytest --log-level=DEBUG --verbose $COMPONENT/tests/test_unitary.py"
+    docker stop $COMPONENT
+done
diff --git a/scripts/build_run_report_tests_locally.sh b/scripts/build_run_report_tests_locally.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9bdc81d9894df35a6bcc325d78e7f1f5214e8a96
--- /dev/null
+++ b/scripts/build_run_report_tests_locally.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+die () {
+    echo >&2 "$@"
+    exit 1
+}
+
+[ "$#" -eq 1 ] || die "component name required but not provided"
+
+COMPONENT_NAME=$1 # parameter
+IMAGE_NAME="${COMPONENT_NAME}-local"
+IMAGE_TAG="latest"
+
+if docker ps | grep $IMAGE_NAME
+then
+    docker stop $IMAGE_NAME
+fi
+
+if docker network list | grep teraflowbridge
+then
+    echo "teraflowbridge is already created"
+else
+    docker network create -d bridge teraflowbridge
+fi
+
+docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$COMPONENT_NAME/Dockerfile .
+
+docker run --name $IMAGE_NAME -d -v "${PWD}/src/${COMPONENT_NAME}/tests:/home/${COMPONENT_NAME}/results" --network=teraflowbridge --rm $IMAGE_NAME:$IMAGE_TAG
+
+docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $COMPONENT_NAME/tests/ --junitxml=/home/${COMPONENT_NAME}/results/${COMPONENT_NAME}_report.xml"
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+echo
+echo "Coverage report:"
+echo "----------------"
+docker exec -i $IMAGE_NAME bash -c "coverage report --include='${COMPONENT_NAME}/*' --show-missing"
+
+# docker stop $IMAGE_NAME
+docker rm -f $IMAGE_NAME
+docker network rm teraflowbridge
diff --git a/scripts/dump_logs.sh b/scripts/dump_logs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a6db945d245b832564353de71610bf720eb0acb8
--- /dev/null
+++ b/scripts/dump_logs.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+mkdir -p tmp/exec_logs/$TFS_K8S_NAMESPACE/
+rm tmp/exec_logs/$TFS_K8S_NAMESPACE/*
+
+PODS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE --no-headers --output=custom-columns=":metadata.name")
+for POD in $PODS; do
+    CONTAINERS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE $POD -o jsonpath='{.spec.containers[*].name}')
+    for CONTAINER in $CONTAINERS; do
+        kubectl --namespace $TFS_K8S_NAMESPACE logs pod/${POD} --container ${CONTAINER} \
+            > tmp/exec_logs/$TFS_K8S_NAMESPACE/$POD\_\_$CONTAINER.log
+    done
+done
diff --git a/scripts/run_tests_locally-service.sh b/scripts/run_tests_locally-service.sh
index 8a2a8d0be1d1960c6197a67e471ae29abba501a7..8816b9faa24e55e486a54852632fdb8e00db1d04 100755
--- a/scripts/run_tests_locally-service.sh
+++ b/scripts/run_tests_locally-service.sh
@@ -21,4 +21,5 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    service/tests/test_unitary_task_scheduler.py \
     service/tests/test_unitary.py
diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh
index 5978035127735c20ddc6387666a5434cbac61ff8..4bafc6daaa1088cb6ab2b401ae3ce4927afacf46 100755
--- a/scripts/show_logs_monitoring.sh
+++ b/scripts/show_logs_monitoring.sh
@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
 # Automated steps start here
 ########################################################################################################################
 
-kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringserver
diff --git a/scripts/show_logs_slice.sh b/scripts/show_logs_slice.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c7bc0b69588307092b22ea3c600669359f04de99
--- /dev/null
+++ b/scripts/show_logs_slice.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/sliceservice
diff --git a/src/automation/README.md b/src/automation/README.md
index 099980bcc4172bf9e5c2d59459f40ae4331696cf..e98d2b8ab62563f43cf2c1011e91fb2a1d08d378 100644
--- a/src/automation/README.md
+++ b/src/automation/README.md
@@ -1,28 +1,57 @@
-# Automation TeraFlow OS service 
+# TeraFlowSDN Automation service
 
-The Automation service, also known as Zero-Touch Provisioning (ZTP), is tested on Ubuntu 20.04. Follow the instructions below to build, test, and run this service on your local environment.
+This repository hosts the TeraFlowSDN Automation service, also known as Zero-Touch Provisioning (ZTP) service.
+Follow the instructions below to build, test, and run this service on your local environment.
 
-## Automation Teraflow OS service architecture
+## TeraFlowSDN Automation service architecture
 
-| The Automation Teraflow OS service architecture consists of six (6) interfaces listed below:                                                                                                                                                 | 
+The TeraFlowSDN Automation architecture consists of six (6) interfaces listed below:
+
+Interfaces |
 |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| 1. The `AutomationGateway` interface that implements all the rpc functions that are described in `automation.proto` file.                                                                                                                    | 
-| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client and implements all the rpc functions that are described in `context.proto` file.                                                                    |
-| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client and implements all the rpc functions that are described in `device.proto` file.                                                                       |
-| 4. The `AutomationService` interface that implements the `addDevice()` method by communicating with a `Context` gRPC client & a `Device` gRPC client through the use of `ContextService` interface & `DeviceService` interface respectively. |
-| 5. The `ContextService` interface that implements the `getDevice()` & `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface.                                               |
-| 6. The `DeviceService` interface that implements the `getInitialConfiguration()` & `configureDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface.                                    |
+| 1. The `AutomationGateway` interface that implements all the RPC functions that are described in `automation.proto` file. |
+| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client to invoke key RPC functions described in `context.proto` file. |
+| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client to invoke key RPC functions described in `device.proto` file. |
+| 4. The `AutomationService` interface that implements the `addDevice()`, `updateDevice()`, and `deleteDevice()` methods by communicating with a `Context` gRPC client and a `Device` gRPC client through the use of `ContextService` interface and `DeviceService` interface respectively. |
+| 5. The `ContextService` interface that implements the `getDevice()` and `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface. |
+| 6. The `DeviceService` interface that implements the `getInitialConfiguration()`, `configureDevice()`, and `deleteDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface. |
+
+
+## Prerequisites
 
+The Automation service is currently tested against Ubuntu 20.04 and Java 11.
 
-## Run with dev profile
+To quickly install Java 11 on a Debian-based Linux distro do:
 
 ```bash
-./mvnw clean quarkus:dev
+sudo apt-get install openjdk-11-jdk -y
 ```
 
-## Running tests
+Feel free to try more recent Java versions.
+
+## Compile
+
+```bash
+./mvnw compile
+```
+
+## Run tests
+
+```bash
+./mvnw test
+```
 
-Run unit and functional tests `./mvnw clean test`
+## Run service
+
+```bash
+./mvnw quarkus:dev
+````
+
+## Clean
+
+```bash
+./mvnw clean
+```
 
 ## Deploying on a Kubernetes cluster
 
@@ -30,10 +59,16 @@ To create the K8s manifest file under `target/kubernetes/kubernetes.yml` to be u
 
 ```bash
 ./mvnw clean package -DskipUTs -DskipITs
-``` 
+```
 
 To deploy the application in a K8s cluster run
 
 ```bash
 kubectl apply -f "manifests/automationservice.yaml"
 ```
+
+## Maintainers
+
+This TeraFlowSDN service is implemented by [UBITECH](https://www.ubitech.eu).
+
+Feel free to contact Georgios Katsikas (gkatsikas at ubitech dot eu) in case you have questions.
diff --git a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
index 2b163fdff1a29c26f98380a0c3b19666a86749fe..a281e221fb3098a76bae737b60e82d65c142d4e6 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java
@@ -217,6 +217,7 @@ public class Serializer {
 
         builder.setDeviceId(deviceId);
         builder.setEvent(serialize(deviceEvent.getEvent()));
+        builder.setDeviceConfig(serialize(deviceEvent.getDeviceConfig().orElse(null)));
 
         return builder.build();
     }
@@ -224,8 +225,9 @@ public class Serializer {
     public DeviceEvent deserialize(ContextOuterClass.DeviceEvent deviceEvent) {
         final var deviceId = deserialize(deviceEvent.getDeviceId());
         final var event = deserialize(deviceEvent.getEvent());
+        final var deviceConfig = deserialize(deviceEvent.getDeviceConfig());
 
-        return new DeviceEvent(deviceId, event);
+        return new DeviceEvent(deviceId, event, deviceConfig);
     }
 
     public ContextOuterClass.ConfigActionEnum serialize(ConfigActionEnum configAction) {
diff --git a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java
index efc0be8308fb9a75132cd604a84fd5b4822f3af7..526b9b7b2ba34edc6d538619bdb190a9aefa9d97 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java
@@ -16,14 +16,23 @@
 
 package eu.teraflow.automation.context.model;
 
+import java.util.Optional;
+
 public class DeviceEvent {
 
     private final Event event;
     private final String deviceId;
+    private final Optional<DeviceConfig> deviceConfig;
 
     public DeviceEvent(String deviceId, Event event) {
+        this(deviceId, event, null);
+    }
+
+    public DeviceEvent(String deviceId, Event event, DeviceConfig deviceConfig) {
         this.event = event;
         this.deviceId = deviceId;
+        this.deviceConfig =
+                (deviceConfig == null) ? Optional.empty() : Optional.ofNullable(deviceConfig);
     }
 
     public Event getEvent() {
@@ -34,8 +43,14 @@ public class DeviceEvent {
         return deviceId;
     }
 
+    public Optional<DeviceConfig> getDeviceConfig() {
+        return deviceConfig;
+    }
+
     @Override
     public String toString() {
-        return String.format("%s[%s, %s]", getClass().getSimpleName(), deviceId, event.toString());
+        return String.format(
+                "%s[%s, %s, %s]",
+                getClass().getSimpleName(), deviceId, event.toString(), deviceConfig.orElse(null));
     }
 }
diff --git a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
index 74f8f301ca7d4db904d9092e8f860fc4dc171a51..63f0eb45fdf0c287b68300db84ef1ef7f88418ba 100644
--- a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
+++ b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java
@@ -308,14 +308,51 @@ class SerializerTest {
                         .setTimestamp(expectedTimestamp)
                         .setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_CREATE)
                         .build();
+
+        final var expectedConfigRuleCustomA =
+                ContextOuterClass.ConfigRule_Custom.newBuilder()
+                        .setResourceKey("resourceKeyA")
+                        .setResourceValue("resourceValueA")
+                        .build();
+
+        final var expectedConfigRuleCustomB =
+                ContextOuterClass.ConfigRule_Custom.newBuilder()
+                        .setResourceKey("resourceKeyB")
+                        .setResourceValue("resourceValueB")
+                        .build();
+
+        final var expectedConfigRuleA =
+                ContextOuterClass.ConfigRule.newBuilder()
+                        .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET)
+                        .setCustom(expectedConfigRuleCustomA)
+                        .build();
+        final var expectedConfigRuleB =
+                ContextOuterClass.ConfigRule.newBuilder()
+                        .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE)
+                        .setCustom(expectedConfigRuleCustomB)
+                        .build();
+
+        final var expectedDeviceConfig =
+                ContextOuterClass.DeviceConfig.newBuilder()
+                        .addAllConfigRules(List.of(expectedConfigRuleA, expectedConfigRuleB))
+                        .build();
+
         final var expectedDeviceEvent =
                 ContextOuterClass.DeviceEvent.newBuilder()
                         .setDeviceId(expectedDeviceId)
                         .setEvent(expectedEvent)
+                        .setDeviceConfig(expectedDeviceConfig)
                         .build();
 
         final var creationEvent = new Event(1, EventTypeEnum.CREATE);
-        final var deviceEvent = new DeviceEvent("deviceId", creationEvent);
+        final var configRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA");
+        final var configRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB");
+        final var configRuleTypeA = new ConfigRuleTypeCustom(configRuleCustomA);
+        final var configRuleTypeB = new ConfigRuleTypeCustom(configRuleCustomB);
+        final var configRuleA = new ConfigRule(ConfigActionEnum.SET, configRuleTypeA);
+        final var configRuleB = new ConfigRule(ConfigActionEnum.DELETE, configRuleTypeB);
+        final var deviceConfig = new DeviceConfig(List.of(configRuleA, configRuleB));
+        final var deviceEvent = new DeviceEvent("deviceId", creationEvent, deviceConfig);
         final var serializedDeviceEvent = serializer.serialize(deviceEvent);
 
         assertThat(serializedDeviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent);
@@ -328,7 +365,22 @@ class SerializerTest {
         final var expectedTimestamp = ContextOuterClass.Timestamp.newBuilder().setTimestamp(1).build();
 
         final var creationEvent = new Event(1, expectedEventType);
-        final var expectedDeviceEvent = new DeviceEvent(dummyDeviceId, creationEvent);
+
+        final var expectedConfigRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA");
+        final var expectedConfigRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB");
+
+        final var expectedConfigRuleTypeA = new ConfigRuleTypeCustom(expectedConfigRuleCustomA);
+        final var expectedConfigRuleTypeB = new ConfigRuleTypeCustom(expectedConfigRuleCustomB);
+
+        final var expectedConfigRuleA = new ConfigRule(ConfigActionEnum.SET, expectedConfigRuleTypeA);
+        final var expectedConfigRuleB =
+                new ConfigRule(ConfigActionEnum.DELETE, expectedConfigRuleTypeB);
+
+        final var expectedDeviceConfig =
+                new DeviceConfig(List.of(expectedConfigRuleA, expectedConfigRuleB));
+
+        final var expectedDeviceEvent =
+                new DeviceEvent(dummyDeviceId, creationEvent, expectedDeviceConfig);
 
         final var deviceUuid = Uuid.newBuilder().setUuid("deviceId");
         final var deviceId = DeviceId.newBuilder().setDeviceUuid(deviceUuid).build();
@@ -337,8 +389,38 @@ class SerializerTest {
                         .setTimestamp(expectedTimestamp)
                         .setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_REMOVE)
                         .build();
+
+        final var configRuleCustomA =
+                ContextOuterClass.ConfigRule_Custom.newBuilder()
+                        .setResourceKey("resourceKeyA")
+                        .setResourceValue("resourceValueA")
+                        .build();
+        final var configRuleCustomB =
+                ContextOuterClass.ConfigRule_Custom.newBuilder()
+                        .setResourceKey("resourceKeyB")
+                        .setResourceValue("resourceValueB")
+                        .build();
+        final var configRuleA =
+                ContextOuterClass.ConfigRule.newBuilder()
+                        .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET)
+                        .setCustom(configRuleCustomA)
+                        .build();
+        final var configRuleB =
+                ContextOuterClass.ConfigRule.newBuilder()
+                        .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE)
+                        .setCustom(configRuleCustomB)
+                        .build();
+        final var deviceConfig =
+                ContextOuterClass.DeviceConfig.newBuilder()
+                        .addAllConfigRules(List.of(configRuleA, configRuleB))
+                        .build();
+
         final var serializedDeviceEvent =
-                ContextOuterClass.DeviceEvent.newBuilder().setDeviceId(deviceId).setEvent(event).build();
+                ContextOuterClass.DeviceEvent.newBuilder()
+                        .setDeviceId(deviceId)
+                        .setEvent(event)
+                        .setDeviceConfig(deviceConfig)
+                        .build();
         final var deviceEvent = serializer.deserialize(serializedDeviceEvent);
 
         assertThat(deviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent);
diff --git a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
index 45a64fabb43bab645e97e9d80bc1825242006dce..3c0d7ce36fcdc4e47697ba11a4ceb3d8e8cdea0c 100644
--- a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
+++ b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java
@@ -17331,6 +17331,21 @@ public final class ContextOuterClass {
      * <code>.context.DeviceId device_id = 2;</code>
      */
     context.ContextOuterClass.DeviceIdOrBuilder getDeviceIdOrBuilder();
+
+    /**
+     * <code>.context.DeviceConfig device_config = 3;</code>
+     * @return Whether the deviceConfig field is set.
+     */
+    boolean hasDeviceConfig();
+    /**
+     * <code>.context.DeviceConfig device_config = 3;</code>
+     * @return The deviceConfig.
+     */
+    context.ContextOuterClass.DeviceConfig getDeviceConfig();
+    /**
+     * <code>.context.DeviceConfig device_config = 3;</code>
+     */
+    context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder();
   }
   /**
    * Protobuf type {@code context.DeviceEvent}
@@ -17403,6 +17418,19 @@ public final class ContextOuterClass {
 
               break;
             }
+            case 26: {
+              context.ContextOuterClass.DeviceConfig.Builder subBuilder = null;
+              if (deviceConfig_ != null) {
+                subBuilder = deviceConfig_.toBuilder();
+              }
+              deviceConfig_ = input.readMessage(context.ContextOuterClass.DeviceConfig.parser(), extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(deviceConfig_);
+                deviceConfig_ = subBuilder.buildPartial();
+              }
+
+              break;
+            }
             default: {
               if (!parseUnknownField(
                   input, unknownFields, extensionRegistry, tag)) {
@@ -17487,6 +17515,32 @@ public final class ContextOuterClass {
       return getDeviceId();
     }
 
+    public static final int DEVICE_CONFIG_FIELD_NUMBER = 3;
+    private context.ContextOuterClass.DeviceConfig deviceConfig_;
+    /**
+     * <code>.context.DeviceConfig device_config = 3;</code>
+     * @return Whether the deviceConfig field is set.
+     */
+    @java.lang.Override
+    public boolean hasDeviceConfig() {
+      return deviceConfig_ != null;
+    }
+    /**
+     * <code>.context.DeviceConfig device_config = 3;</code>
+     * @return The deviceConfig.
+     */
+    @java.lang.Override
+    public context.ContextOuterClass.DeviceConfig getDeviceConfig() {
+      return deviceConfig_ == null ? context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_;
+    }
+    /**
+     * <code>.context.DeviceConfig device_config = 3;</code>
+     */
+    @java.lang.Override
+    public context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder() {
+      return getDeviceConfig();
+    }
+
     private byte memoizedIsInitialized = -1;
     @java.lang.Override
     public final boolean isInitialized() {
@@ -17507,6 +17561,9 @@ public final class ContextOuterClass {
       if (deviceId_ != null) {
         output.writeMessage(2, getDeviceId());
       }
+      if (deviceConfig_ != null) {
+        output.writeMessage(3, getDeviceConfig());
+      }
       unknownFields.writeTo(output);
     }
 
@@ -17524,6 +17581,10 @@ public final class ContextOuterClass {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(2, getDeviceId());
       }
+      if (deviceConfig_ != null) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, getDeviceConfig());
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -17549,6 +17610,11 @@ public final class ContextOuterClass {
         if (!getDeviceId()
             .equals(other.getDeviceId())) return false;
       }
+      if (hasDeviceConfig() != other.hasDeviceConfig()) return false;
+      if (hasDeviceConfig()) {
+        if (!getDeviceConfig()
+            .equals(other.getDeviceConfig())) return false;
+      }
       if (!unknownFields.equals(other.unknownFields)) return false;
       return true;
     }
@@ -17568,6 +17634,10 @@ public final class ContextOuterClass {
         hash = (37 * hash) + DEVICE_ID_FIELD_NUMBER;
         hash = (53 * hash) + getDeviceId().hashCode();
       }
+      if (hasDeviceConfig()) {
+        hash = (37 * hash) + DEVICE_CONFIG_FIELD_NUMBER;
+        hash = (53 * hash) + getDeviceConfig().hashCode();
+      }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -17713,6 +17783,12 @@ public final class ContextOuterClass {
           deviceId_ = null;
           deviceIdBuilder_ = null;
         }
+        if (deviceConfigBuilder_ == null) {
+          deviceConfig_ = null;
+        } else {
+          deviceConfig_ = null;
+          deviceConfigBuilder_ = null;
+        }
         return this;
       }
 
@@ -17749,6 +17825,11 @@ public final class ContextOuterClass {
         } else {
           result.deviceId_ = deviceIdBuilder_.build();
         }
+        if (deviceConfigBuilder_ == null) {
+          result.deviceConfig_ = deviceConfig_;
+        } else {
+          result.deviceConfig_ = deviceConfigBuilder_.build();
+        }
         onBuilt();
         return result;
       }
@@ -17803,6 +17884,9 @@ public final class ContextOuterClass {
         if (other.hasDeviceId()) {
           mergeDeviceId(other.getDeviceId());
         }
+        if (other.hasDeviceConfig()) {
+          mergeDeviceConfig(other.getDeviceConfig());
+        }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
         return this;
@@ -18069,6 +18153,125 @@ public final class ContextOuterClass {
         }
         return deviceIdBuilder_;
       }
+
+      private context.ContextOuterClass.DeviceConfig deviceConfig_;
+      private com.google.protobuf.SingleFieldBuilderV3<
+          context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder> deviceConfigBuilder_;
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       * @return Whether the deviceConfig field is set.
+       */
+      public boolean hasDeviceConfig() {
+        return deviceConfigBuilder_ != null || deviceConfig_ != null;
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       * @return The deviceConfig.
+       */
+      public context.ContextOuterClass.DeviceConfig getDeviceConfig() {
+        if (deviceConfigBuilder_ == null) {
+          return deviceConfig_ == null ? context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_;
+        } else {
+          return deviceConfigBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       */
+      public Builder setDeviceConfig(context.ContextOuterClass.DeviceConfig value) {
+        if (deviceConfigBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          deviceConfig_ = value;
+          onChanged();
+        } else {
+          deviceConfigBuilder_.setMessage(value);
+        }
+
+        return this;
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       */
+      public Builder setDeviceConfig(
+          context.ContextOuterClass.DeviceConfig.Builder builderForValue) {
+        if (deviceConfigBuilder_ == null) {
+          deviceConfig_ = builderForValue.build();
+          onChanged();
+        } else {
+          deviceConfigBuilder_.setMessage(builderForValue.build());
+        }
+
+        return this;
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       */
+      public Builder mergeDeviceConfig(context.ContextOuterClass.DeviceConfig value) {
+        if (deviceConfigBuilder_ == null) {
+          if (deviceConfig_ != null) {
+            deviceConfig_ =
+              context.ContextOuterClass.DeviceConfig.newBuilder(deviceConfig_).mergeFrom(value).buildPartial();
+          } else {
+            deviceConfig_ = value;
+          }
+          onChanged();
+        } else {
+          deviceConfigBuilder_.mergeFrom(value);
+        }
+
+        return this;
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       */
+      public Builder clearDeviceConfig() {
+        if (deviceConfigBuilder_ == null) {
+          deviceConfig_ = null;
+          onChanged();
+        } else {
+          deviceConfig_ = null;
+          deviceConfigBuilder_ = null;
+        }
+
+        return this;
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       */
+      public context.ContextOuterClass.DeviceConfig.Builder getDeviceConfigBuilder() {
+        
+        onChanged();
+        return getDeviceConfigFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       */
+      public context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder() {
+        if (deviceConfigBuilder_ != null) {
+          return deviceConfigBuilder_.getMessageOrBuilder();
+        } else {
+          return deviceConfig_ == null ?
+              context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_;
+        }
+      }
+      /**
+       * <code>.context.DeviceConfig device_config = 3;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilderV3<
+          context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder> 
+          getDeviceConfigFieldBuilder() {
+        if (deviceConfigBuilder_ == null) {
+          deviceConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
+              context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder>(
+                  getDeviceConfig(),
+                  getParentForChildren(),
+                  isClean());
+          deviceConfig_ = null;
+        }
+        return deviceConfigBuilder_;
+      }
       @java.lang.Override
       public final Builder setUnknownFields(
           final com.google.protobuf.UnknownFieldSet unknownFields) {
@@ -61981,230 +62184,234 @@ public final class ContextOuterClass {
       "(\0132\023.context.ConfigRule\"5\n\014DeviceIdList\022" +
       "%\n\ndevice_ids\030\001 \003(\0132\021.context.DeviceId\"." +
       "\n\nDeviceList\022 \n\007devices\030\001 \003(\0132\017.context." +
-      "Device\"R\n\013DeviceEvent\022\035\n\005event\030\001 \001(\0132\016.c" +
-      "ontext.Event\022$\n\tdevice_id\030\002 \001(\0132\021.contex" +
-      "t.DeviceId\"*\n\006LinkId\022 \n\tlink_uuid\030\001 \001(\0132" +
-      "\r.context.Uuid\"X\n\004Link\022 \n\007link_id\030\001 \001(\0132" +
-      "\017.context.LinkId\022.\n\021link_endpoint_ids\030\002 " +
-      "\003(\0132\023.context.EndPointId\"/\n\nLinkIdList\022!" +
-      "\n\010link_ids\030\001 \003(\0132\017.context.LinkId\"(\n\010Lin" +
-      "kList\022\034\n\005links\030\001 \003(\0132\r.context.Link\"L\n\tL" +
-      "inkEvent\022\035\n\005event\030\001 \001(\0132\016.context.Event\022" +
-      " \n\007link_id\030\002 \001(\0132\017.context.LinkId\"X\n\tSer" +
-      "viceId\022&\n\ncontext_id\030\001 \001(\0132\022.context.Con" +
-      "textId\022#\n\014service_uuid\030\002 \001(\0132\r.context.U" +
-      "uid\"\315\002\n\007Service\022&\n\nservice_id\030\001 \001(\0132\022.co" +
-      "ntext.ServiceId\022.\n\014service_type\030\002 \001(\0162\030." +
-      "context.ServiceTypeEnum\0221\n\024service_endpo" +
-      "int_ids\030\003 \003(\0132\023.context.EndPointId\0220\n\023se" +
-      "rvice_constraints\030\004 \003(\0132\023.context.Constr" +
-      "aint\022.\n\016service_status\030\005 \001(\0132\026.context.S" +
-      "erviceStatus\022.\n\016service_config\030\006 \001(\0132\026.c" +
-      "ontext.ServiceConfig\022%\n\ttimestamp\030\007 \001(\0132" +
-      "\022.context.Timestamp\"C\n\rServiceStatus\0222\n\016" +
-      "service_status\030\001 \001(\0162\032.context.ServiceSt" +
-      "atusEnum\":\n\rServiceConfig\022)\n\014config_rule" +
-      "s\030\001 \003(\0132\023.context.ConfigRule\"8\n\rServiceI" +
-      "dList\022\'\n\013service_ids\030\001 \003(\0132\022.context.Ser" +
-      "viceId\"1\n\013ServiceList\022\"\n\010services\030\001 \003(\0132" +
-      "\020.context.Service\"U\n\014ServiceEvent\022\035\n\005eve" +
-      "nt\030\001 \001(\0132\016.context.Event\022&\n\nservice_id\030\002" +
-      " \001(\0132\022.context.ServiceId\"T\n\007SliceId\022&\n\nc" +
-      "ontext_id\030\001 \001(\0132\022.context.ContextId\022!\n\ns" +
-      "lice_uuid\030\002 \001(\0132\r.context.Uuid\"\222\003\n\005Slice" +
-      "\022\"\n\010slice_id\030\001 \001(\0132\020.context.SliceId\022/\n\022" +
-      "slice_endpoint_ids\030\002 \003(\0132\023.context.EndPo" +
-      "intId\022.\n\021slice_constraints\030\003 \003(\0132\023.conte" +
-      "xt.Constraint\022-\n\021slice_service_ids\030\004 \003(\013" +
-      "2\022.context.ServiceId\022,\n\022slice_subslice_i" +
-      "ds\030\005 \003(\0132\020.context.SliceId\022*\n\014slice_stat" +
-      "us\030\006 \001(\0132\024.context.SliceStatus\022*\n\014slice_" +
-      "config\030\007 \001(\0132\024.context.SliceConfig\022(\n\013sl" +
-      "ice_owner\030\010 \001(\0132\023.context.SliceOwner\022%\n\t" +
-      "timestamp\030\t \001(\0132\022.context.Timestamp\"E\n\nS" +
-      "liceOwner\022!\n\nowner_uuid\030\001 \001(\0132\r.context." +
-      "Uuid\022\024\n\014owner_string\030\002 \001(\t\"=\n\013SliceStatu" +
-      "s\022.\n\014slice_status\030\001 \001(\0162\030.context.SliceS" +
-      "tatusEnum\"8\n\013SliceConfig\022)\n\014config_rules" +
-      "\030\001 \003(\0132\023.context.ConfigRule\"2\n\013SliceIdLi" +
-      "st\022#\n\tslice_ids\030\001 \003(\0132\020.context.SliceId\"" +
-      "+\n\tSliceList\022\036\n\006slices\030\001 \003(\0132\016.context.S" +
-      "lice\"O\n\nSliceEvent\022\035\n\005event\030\001 \001(\0132\016.cont" +
-      "ext.Event\022\"\n\010slice_id\030\002 \001(\0132\020.context.Sl" +
-      "iceId\"6\n\014ConnectionId\022&\n\017connection_uuid" +
-      "\030\001 \001(\0132\r.context.Uuid\"2\n\025ConnectionSetti" +
-      "ngs_L0\022\031\n\021lsp_symbolic_name\030\001 \001(\t\"\236\001\n\025Co" +
-      "nnectionSettings_L2\022\027\n\017src_mac_address\030\001" +
-      " \001(\t\022\027\n\017dst_mac_address\030\002 \001(\t\022\022\n\nether_t" +
-      "ype\030\003 \001(\r\022\017\n\007vlan_id\030\004 \001(\r\022\022\n\nmpls_label" +
-      "\030\005 \001(\r\022\032\n\022mpls_traffic_class\030\006 \001(\r\"t\n\025Co" +
-      "nnectionSettings_L3\022\026\n\016src_ip_address\030\001 " +
-      "\001(\t\022\026\n\016dst_ip_address\030\002 \001(\t\022\014\n\004dscp\030\003 \001(" +
-      "\r\022\020\n\010protocol\030\004 \001(\r\022\013\n\003ttl\030\005 \001(\r\"[\n\025Conn" +
-      "ectionSettings_L4\022\020\n\010src_port\030\001 \001(\r\022\020\n\010d" +
-      "st_port\030\002 \001(\r\022\021\n\ttcp_flags\030\003 \001(\r\022\013\n\003ttl\030" +
-      "\004 \001(\r\"\304\001\n\022ConnectionSettings\022*\n\002l0\030\001 \001(\013" +
-      "2\036.context.ConnectionSettings_L0\022*\n\002l2\030\002" +
-      " \001(\0132\036.context.ConnectionSettings_L2\022*\n\002" +
-      "l3\030\003 \001(\0132\036.context.ConnectionSettings_L3" +
-      "\022*\n\002l4\030\004 \001(\0132\036.context.ConnectionSetting" +
-      "s_L4\"\363\001\n\nConnection\022,\n\rconnection_id\030\001 \001" +
-      "(\0132\025.context.ConnectionId\022&\n\nservice_id\030" +
-      "\002 \001(\0132\022.context.ServiceId\0223\n\026path_hops_e" +
-      "ndpoint_ids\030\003 \003(\0132\023.context.EndPointId\022+" +
-      "\n\017sub_service_ids\030\004 \003(\0132\022.context.Servic" +
-      "eId\022-\n\010settings\030\005 \001(\0132\033.context.Connecti" +
-      "onSettings\"A\n\020ConnectionIdList\022-\n\016connec" +
-      "tion_ids\030\001 \003(\0132\025.context.ConnectionId\":\n" +
-      "\016ConnectionList\022(\n\013connections\030\001 \003(\0132\023.c" +
-      "ontext.Connection\"^\n\017ConnectionEvent\022\035\n\005" +
-      "event\030\001 \001(\0132\016.context.Event\022,\n\rconnectio" +
-      "n_id\030\002 \001(\0132\025.context.ConnectionId\"\202\001\n\nEn" +
-      "dPointId\022(\n\013topology_id\030\001 \001(\0132\023.context." +
-      "TopologyId\022$\n\tdevice_id\030\002 \001(\0132\021.context." +
-      "DeviceId\022$\n\rendpoint_uuid\030\003 \001(\0132\r.contex" +
-      "t.Uuid\"\264\001\n\010EndPoint\022(\n\013endpoint_id\030\001 \001(\013" +
-      "2\023.context.EndPointId\022\025\n\rendpoint_type\030\002" +
-      " \001(\t\0229\n\020kpi_sample_types\030\003 \003(\0162\037.kpi_sam" +
-      "ple_types.KpiSampleType\022,\n\021endpoint_loca" +
-      "tion\030\004 \001(\0132\021.context.Location\"A\n\021ConfigR" +
-      "ule_Custom\022\024\n\014resource_key\030\001 \001(\t\022\026\n\016reso" +
-      "urce_value\030\002 \001(\t\"]\n\016ConfigRule_ACL\022(\n\013en" +
-      "dpoint_id\030\001 \001(\0132\023.context.EndPointId\022!\n\010" +
-      "rule_set\030\002 \001(\0132\017.acl.AclRuleSet\"\234\001\n\nConf" +
-      "igRule\022)\n\006action\030\001 \001(\0162\031.context.ConfigA" +
-      "ctionEnum\022,\n\006custom\030\002 \001(\0132\032.context.Conf" +
-      "igRule_CustomH\000\022&\n\003acl\030\003 \001(\0132\027.context.C" +
-      "onfigRule_ACLH\000B\r\n\013config_rule\"F\n\021Constr" +
-      "aint_Custom\022\027\n\017constraint_type\030\001 \001(\t\022\030\n\020" +
-      "constraint_value\030\002 \001(\t\"E\n\023Constraint_Sch" +
-      "edule\022\027\n\017start_timestamp\030\001 \001(\002\022\025\n\rdurati" +
-      "on_days\030\002 \001(\002\"3\n\014GPS_Position\022\020\n\010latitud" +
-      "e\030\001 \001(\002\022\021\n\tlongitude\030\002 \001(\002\"W\n\010Location\022\020" +
-      "\n\006region\030\001 \001(\tH\000\022-\n\014gps_position\030\002 \001(\0132\025" +
-      ".context.GPS_PositionH\000B\n\n\010location\"l\n\033C" +
-      "onstraint_EndPointLocation\022(\n\013endpoint_i" +
-      "d\030\001 \001(\0132\023.context.EndPointId\022#\n\010location" +
-      "\030\002 \001(\0132\021.context.Location\"Y\n\033Constraint_" +
-      "EndPointPriority\022(\n\013endpoint_id\030\001 \001(\0132\023." +
-      "context.EndPointId\022\020\n\010priority\030\002 \001(\r\"0\n\026" +
-      "Constraint_SLA_Latency\022\026\n\016e2e_latency_ms" +
-      "\030\001 \001(\002\"0\n\027Constraint_SLA_Capacity\022\025\n\rcap" +
-      "acity_gbps\030\001 \001(\002\"M\n\033Constraint_SLA_Avail" +
-      "ability\022\032\n\022num_disjoint_paths\030\001 \001(\r\022\022\n\na" +
-      "ll_active\030\002 \001(\010\"V\n\036Constraint_SLA_Isolat" +
-      "ion_level\0224\n\017isolation_level\030\001 \003(\0162\033.con" +
-      "text.IsolationLevelEnum\"\366\003\n\nConstraint\022," +
-      "\n\006custom\030\001 \001(\0132\032.context.Constraint_Cust" +
-      "omH\000\0220\n\010schedule\030\002 \001(\0132\034.context.Constra" +
-      "int_ScheduleH\000\022A\n\021endpoint_location\030\003 \001(" +
-      "\0132$.context.Constraint_EndPointLocationH" +
-      "\000\022A\n\021endpoint_priority\030\004 \001(\0132$.context.C" +
-      "onstraint_EndPointPriorityH\000\0228\n\014sla_capa" +
-      "city\030\005 \001(\0132 .context.Constraint_SLA_Capa" +
-      "cityH\000\0226\n\013sla_latency\030\006 \001(\0132\037.context.Co" +
-      "nstraint_SLA_LatencyH\000\022@\n\020sla_availabili" +
-      "ty\030\007 \001(\0132$.context.Constraint_SLA_Availa" +
-      "bilityH\000\022@\n\rsla_isolation\030\010 \001(\0132\'.contex" +
-      "t.Constraint_SLA_Isolation_levelH\000B\014\n\nco" +
-      "nstraint\"^\n\022TeraFlowController\022&\n\ncontex" +
-      "t_id\030\001 \001(\0132\022.context.ContextId\022\022\n\nip_add" +
-      "ress\030\002 \001(\t\022\014\n\004port\030\003 \001(\r\"U\n\024Authenticati" +
-      "onResult\022&\n\ncontext_id\030\001 \001(\0132\022.context.C" +
-      "ontextId\022\025\n\rauthenticated\030\002 \001(\010*j\n\rEvent" +
-      "TypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000\022\024\n\020EVE" +
-      "NTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDATE\020\002\022\024\n" +
-      "\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDriverEnum" +
-      "\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027DEVICEDR" +
-      "IVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER_TRANSP" +
-      "ORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n\"DEVICE" +
-      "DRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n\027DEVICE" +
-      "DRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOperationa" +
-      "lStatusEnum\022%\n!DEVICEOPERATIONALSTATUS_U" +
-      "NDEFINED\020\000\022$\n DEVICEOPERATIONALSTATUS_DI" +
-      "SABLED\020\001\022#\n\037DEVICEOPERATIONALSTATUS_ENAB" +
-      "LED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERVICETYPE" +
-      "_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022\024\n\020SERV" +
-      "ICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAPI_CONNE" +
-      "CTIVITY_SERVICE\020\003*\250\001\n\021ServiceStatusEnum\022" +
-      "\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025SERVICES" +
-      "TATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_ACTIVE\020" +
-      "\002\022!\n\035SERVICESTATUS_PENDING_REMOVAL\020\003\022\036\n\032" +
-      "SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017SliceSt" +
-      "atusEnum\022\031\n\025SLICESTATUS_UNDEFINED\020\000\022\027\n\023S" +
-      "LICESTATUS_PLANNED\020\001\022\024\n\020SLICESTATUS_INIT" +
-      "\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLICESTATU" +
-      "S_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIOLATED\020\005" +
-      "*]\n\020ConfigActionEnum\022\032\n\026CONFIGACTION_UND" +
-      "EFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n\023CONFIG" +
-      "ACTION_DELETE\020\002*\203\002\n\022IsolationLevelEnum\022\020" +
-      "\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOLATION\020\001" +
-      "\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCESS_ISOLA" +
-      "TION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATION\020\004\022\036\n" +
-      "\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032VIRTUAL" +
-      "_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_FUNCTIO" +
-      "NS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATION\020\0102\357\022" +
-      "\n\016ContextService\022:\n\016ListContextIds\022\016.con" +
-      "text.Empty\032\026.context.ContextIdList\"\000\0226\n\014" +
-      "ListContexts\022\016.context.Empty\032\024.context.C" +
-      "ontextList\"\000\0224\n\nGetContext\022\022.context.Con" +
-      "textId\032\020.context.Context\"\000\0224\n\nSetContext" +
-      "\022\020.context.Context\032\022.context.ContextId\"\000" +
-      "\0225\n\rRemoveContext\022\022.context.ContextId\032\016." +
-      "context.Empty\"\000\022=\n\020GetContextEvents\022\016.co" +
-      "ntext.Empty\032\025.context.ContextEvent\"\0000\001\022@" +
-      "\n\017ListTopologyIds\022\022.context.ContextId\032\027." +
-      "context.TopologyIdList\"\000\022=\n\016ListTopologi" +
-      "es\022\022.context.ContextId\032\025.context.Topolog" +
-      "yList\"\000\0227\n\013GetTopology\022\023.context.Topolog" +
-      "yId\032\021.context.Topology\"\000\0227\n\013SetTopology\022" +
-      "\021.context.Topology\032\023.context.TopologyId\"" +
-      "\000\0227\n\016RemoveTopology\022\023.context.TopologyId" +
-      "\032\016.context.Empty\"\000\022?\n\021GetTopologyEvents\022" +
-      "\016.context.Empty\032\026.context.TopologyEvent\"" +
-      "\0000\001\0228\n\rListDeviceIds\022\016.context.Empty\032\025.c" +
-      "ontext.DeviceIdList\"\000\0224\n\013ListDevices\022\016.c" +
-      "ontext.Empty\032\023.context.DeviceList\"\000\0221\n\tG" +
-      "etDevice\022\021.context.DeviceId\032\017.context.De" +
-      "vice\"\000\0221\n\tSetDevice\022\017.context.Device\032\021.c" +
-      "ontext.DeviceId\"\000\0223\n\014RemoveDevice\022\021.cont" +
-      "ext.DeviceId\032\016.context.Empty\"\000\022;\n\017GetDev" +
-      "iceEvents\022\016.context.Empty\032\024.context.Devi" +
-      "ceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.context.Emp" +
-      "ty\032\023.context.LinkIdList\"\000\0220\n\tListLinks\022\016" +
-      ".context.Empty\032\021.context.LinkList\"\000\022+\n\007G" +
-      "etLink\022\017.context.LinkId\032\r.context.Link\"\000" +
-      "\022+\n\007SetLink\022\r.context.Link\032\017.context.Lin" +
-      "kId\"\000\022/\n\nRemoveLink\022\017.context.LinkId\032\016.c" +
-      "ontext.Empty\"\000\0227\n\rGetLinkEvents\022\016.contex" +
-      "t.Empty\032\022.context.LinkEvent\"\0000\001\022>\n\016ListS" +
-      "erviceIds\022\022.context.ContextId\032\026.context." +
-      "ServiceIdList\"\000\022:\n\014ListServices\022\022.contex" +
-      "t.ContextId\032\024.context.ServiceList\"\000\0224\n\nG" +
-      "etService\022\022.context.ServiceId\032\020.context." +
-      "Service\"\000\0224\n\nSetService\022\020.context.Servic" +
-      "e\032\022.context.ServiceId\"\000\0225\n\rRemoveService" +
-      "\022\022.context.ServiceId\032\016.context.Empty\"\000\022=" +
-      "\n\020GetServiceEvents\022\016.context.Empty\032\025.con" +
-      "text.ServiceEvent\"\0000\001\022:\n\014ListSliceIds\022\022." +
-      "context.ContextId\032\024.context.SliceIdList\"" +
-      "\000\0226\n\nListSlices\022\022.context.ContextId\032\022.co" +
-      "ntext.SliceList\"\000\022.\n\010GetSlice\022\020.context." +
-      "SliceId\032\016.context.Slice\"\000\022.\n\010SetSlice\022\016." +
-      "context.Slice\032\020.context.SliceId\"\000\0221\n\013Rem" +
-      "oveSlice\022\020.context.SliceId\032\016.context.Emp" +
-      "ty\"\000\0229\n\016GetSliceEvents\022\016.context.Empty\032\023" +
-      ".context.SliceEvent\"\0000\001\022D\n\021ListConnectio" +
-      "nIds\022\022.context.ServiceId\032\031.context.Conne" +
-      "ctionIdList\"\000\022@\n\017ListConnections\022\022.conte" +
-      "xt.ServiceId\032\027.context.ConnectionList\"\000\022" +
-      "=\n\rGetConnection\022\025.context.ConnectionId\032" +
-      "\023.context.Connection\"\000\022=\n\rSetConnection\022" +
-      "\023.context.Connection\032\025.context.Connectio" +
-      "nId\"\000\022;\n\020RemoveConnection\022\025.context.Conn" +
-      "ectionId\032\016.context.Empty\"\000\022C\n\023GetConnect" +
-      "ionEvents\022\016.context.Empty\032\030.context.Conn" +
-      "ectionEvent\"\0000\001b\006proto3"
+      "Device\"\200\001\n\013DeviceEvent\022\035\n\005event\030\001 \001(\0132\016." +
+      "context.Event\022$\n\tdevice_id\030\002 \001(\0132\021.conte" +
+      "xt.DeviceId\022,\n\rdevice_config\030\003 \001(\0132\025.con" +
+      "text.DeviceConfig\"*\n\006LinkId\022 \n\tlink_uuid" +
+      "\030\001 \001(\0132\r.context.Uuid\"X\n\004Link\022 \n\007link_id" +
+      "\030\001 \001(\0132\017.context.LinkId\022.\n\021link_endpoint" +
+      "_ids\030\002 \003(\0132\023.context.EndPointId\"/\n\nLinkI" +
+      "dList\022!\n\010link_ids\030\001 \003(\0132\017.context.LinkId" +
+      "\"(\n\010LinkList\022\034\n\005links\030\001 \003(\0132\r.context.Li" +
+      "nk\"L\n\tLinkEvent\022\035\n\005event\030\001 \001(\0132\016.context" +
+      ".Event\022 \n\007link_id\030\002 \001(\0132\017.context.LinkId" +
+      "\"X\n\tServiceId\022&\n\ncontext_id\030\001 \001(\0132\022.cont" +
+      "ext.ContextId\022#\n\014service_uuid\030\002 \001(\0132\r.co" +
+      "ntext.Uuid\"\315\002\n\007Service\022&\n\nservice_id\030\001 \001" +
+      "(\0132\022.context.ServiceId\022.\n\014service_type\030\002" +
+      " \001(\0162\030.context.ServiceTypeEnum\0221\n\024servic" +
+      "e_endpoint_ids\030\003 \003(\0132\023.context.EndPointI" +
+      "d\0220\n\023service_constraints\030\004 \003(\0132\023.context" +
+      ".Constraint\022.\n\016service_status\030\005 \001(\0132\026.co" +
+      "ntext.ServiceStatus\022.\n\016service_config\030\006 " +
+      "\001(\0132\026.context.ServiceConfig\022%\n\ttimestamp" +
+      "\030\007 \001(\0132\022.context.Timestamp\"C\n\rServiceSta" +
+      "tus\0222\n\016service_status\030\001 \001(\0162\032.context.Se" +
+      "rviceStatusEnum\":\n\rServiceConfig\022)\n\014conf" +
+      "ig_rules\030\001 \003(\0132\023.context.ConfigRule\"8\n\rS" +
+      "erviceIdList\022\'\n\013service_ids\030\001 \003(\0132\022.cont" +
+      "ext.ServiceId\"1\n\013ServiceList\022\"\n\010services" +
+      "\030\001 \003(\0132\020.context.Service\"U\n\014ServiceEvent" +
+      "\022\035\n\005event\030\001 \001(\0132\016.context.Event\022&\n\nservi" +
+      "ce_id\030\002 \001(\0132\022.context.ServiceId\"T\n\007Slice" +
+      "Id\022&\n\ncontext_id\030\001 \001(\0132\022.context.Context" +
+      "Id\022!\n\nslice_uuid\030\002 \001(\0132\r.context.Uuid\"\222\003" +
+      "\n\005Slice\022\"\n\010slice_id\030\001 \001(\0132\020.context.Slic" +
+      "eId\022/\n\022slice_endpoint_ids\030\002 \003(\0132\023.contex" +
+      "t.EndPointId\022.\n\021slice_constraints\030\003 \003(\0132" +
+      "\023.context.Constraint\022-\n\021slice_service_id" +
+      "s\030\004 \003(\0132\022.context.ServiceId\022,\n\022slice_sub" +
+      "slice_ids\030\005 \003(\0132\020.context.SliceId\022*\n\014sli" +
+      "ce_status\030\006 \001(\0132\024.context.SliceStatus\022*\n" +
+      "\014slice_config\030\007 \001(\0132\024.context.SliceConfi" +
+      "g\022(\n\013slice_owner\030\010 \001(\0132\023.context.SliceOw" +
+      "ner\022%\n\ttimestamp\030\t \001(\0132\022.context.Timesta" +
+      "mp\"E\n\nSliceOwner\022!\n\nowner_uuid\030\001 \001(\0132\r.c" +
+      "ontext.Uuid\022\024\n\014owner_string\030\002 \001(\t\"=\n\013Sli" +
+      "ceStatus\022.\n\014slice_status\030\001 \001(\0162\030.context" +
+      ".SliceStatusEnum\"8\n\013SliceConfig\022)\n\014confi" +
+      "g_rules\030\001 \003(\0132\023.context.ConfigRule\"2\n\013Sl" +
+      "iceIdList\022#\n\tslice_ids\030\001 \003(\0132\020.context.S" +
+      "liceId\"+\n\tSliceList\022\036\n\006slices\030\001 \003(\0132\016.co" +
+      "ntext.Slice\"O\n\nSliceEvent\022\035\n\005event\030\001 \001(\013" +
+      "2\016.context.Event\022\"\n\010slice_id\030\002 \001(\0132\020.con" +
+      "text.SliceId\"6\n\014ConnectionId\022&\n\017connecti" +
+      "on_uuid\030\001 \001(\0132\r.context.Uuid\"2\n\025Connecti" +
+      "onSettings_L0\022\031\n\021lsp_symbolic_name\030\001 \001(\t" +
+      "\"\236\001\n\025ConnectionSettings_L2\022\027\n\017src_mac_ad" +
+      "dress\030\001 \001(\t\022\027\n\017dst_mac_address\030\002 \001(\t\022\022\n\n" +
+      "ether_type\030\003 \001(\r\022\017\n\007vlan_id\030\004 \001(\r\022\022\n\nmpl" +
+      "s_label\030\005 \001(\r\022\032\n\022mpls_traffic_class\030\006 \001(" +
+      "\r\"t\n\025ConnectionSettings_L3\022\026\n\016src_ip_add" +
+      "ress\030\001 \001(\t\022\026\n\016dst_ip_address\030\002 \001(\t\022\014\n\004ds" +
+      "cp\030\003 \001(\r\022\020\n\010protocol\030\004 \001(\r\022\013\n\003ttl\030\005 \001(\r\"" +
+      "[\n\025ConnectionSettings_L4\022\020\n\010src_port\030\001 \001" +
+      "(\r\022\020\n\010dst_port\030\002 \001(\r\022\021\n\ttcp_flags\030\003 \001(\r\022" +
+      "\013\n\003ttl\030\004 \001(\r\"\304\001\n\022ConnectionSettings\022*\n\002l" +
+      "0\030\001 \001(\0132\036.context.ConnectionSettings_L0\022" +
+      "*\n\002l2\030\002 \001(\0132\036.context.ConnectionSettings" +
+      "_L2\022*\n\002l3\030\003 \001(\0132\036.context.ConnectionSett" +
+      "ings_L3\022*\n\002l4\030\004 \001(\0132\036.context.Connection" +
+      "Settings_L4\"\363\001\n\nConnection\022,\n\rconnection" +
+      "_id\030\001 \001(\0132\025.context.ConnectionId\022&\n\nserv" +
+      "ice_id\030\002 \001(\0132\022.context.ServiceId\0223\n\026path" +
+      "_hops_endpoint_ids\030\003 \003(\0132\023.context.EndPo" +
+      "intId\022+\n\017sub_service_ids\030\004 \003(\0132\022.context" +
+      ".ServiceId\022-\n\010settings\030\005 \001(\0132\033.context.C" +
+      "onnectionSettings\"A\n\020ConnectionIdList\022-\n" +
+      "\016connection_ids\030\001 \003(\0132\025.context.Connecti" +
+      "onId\":\n\016ConnectionList\022(\n\013connections\030\001 " +
+      "\003(\0132\023.context.Connection\"^\n\017ConnectionEv" +
+      "ent\022\035\n\005event\030\001 \001(\0132\016.context.Event\022,\n\rco" +
+      "nnection_id\030\002 \001(\0132\025.context.ConnectionId" +
+      "\"\202\001\n\nEndPointId\022(\n\013topology_id\030\001 \001(\0132\023.c" +
+      "ontext.TopologyId\022$\n\tdevice_id\030\002 \001(\0132\021.c" +
+      "ontext.DeviceId\022$\n\rendpoint_uuid\030\003 \001(\0132\r" +
+      ".context.Uuid\"\264\001\n\010EndPoint\022(\n\013endpoint_i" +
+      "d\030\001 \001(\0132\023.context.EndPointId\022\025\n\rendpoint" +
+      "_type\030\002 \001(\t\0229\n\020kpi_sample_types\030\003 \003(\0162\037." +
+      "kpi_sample_types.KpiSampleType\022,\n\021endpoi" +
+      "nt_location\030\004 \001(\0132\021.context.Location\"A\n\021" +
+      "ConfigRule_Custom\022\024\n\014resource_key\030\001 \001(\t\022" +
+      "\026\n\016resource_value\030\002 \001(\t\"]\n\016ConfigRule_AC" +
+      "L\022(\n\013endpoint_id\030\001 \001(\0132\023.context.EndPoin" +
+      "tId\022!\n\010rule_set\030\002 \001(\0132\017.acl.AclRuleSet\"\234" +
+      "\001\n\nConfigRule\022)\n\006action\030\001 \001(\0162\031.context." +
+      "ConfigActionEnum\022,\n\006custom\030\002 \001(\0132\032.conte" +
+      "xt.ConfigRule_CustomH\000\022&\n\003acl\030\003 \001(\0132\027.co" +
+      "ntext.ConfigRule_ACLH\000B\r\n\013config_rule\"F\n" +
+      "\021Constraint_Custom\022\027\n\017constraint_type\030\001 " +
+      "\001(\t\022\030\n\020constraint_value\030\002 \001(\t\"E\n\023Constra" +
+      "int_Schedule\022\027\n\017start_timestamp\030\001 \001(\002\022\025\n" +
+      "\rduration_days\030\002 \001(\002\"3\n\014GPS_Position\022\020\n\010" +
+      "latitude\030\001 \001(\002\022\021\n\tlongitude\030\002 \001(\002\"W\n\010Loc" +
+      "ation\022\020\n\006region\030\001 \001(\tH\000\022-\n\014gps_position\030" +
+      "\002 \001(\0132\025.context.GPS_PositionH\000B\n\n\010locati" +
+      "on\"l\n\033Constraint_EndPointLocation\022(\n\013end" +
+      "point_id\030\001 \001(\0132\023.context.EndPointId\022#\n\010l" +
+      "ocation\030\002 \001(\0132\021.context.Location\"Y\n\033Cons" +
+      "traint_EndPointPriority\022(\n\013endpoint_id\030\001" +
+      " \001(\0132\023.context.EndPointId\022\020\n\010priority\030\002 " +
+      "\001(\r\"0\n\026Constraint_SLA_Latency\022\026\n\016e2e_lat" +
+      "ency_ms\030\001 \001(\002\"0\n\027Constraint_SLA_Capacity" +
+      "\022\025\n\rcapacity_gbps\030\001 \001(\002\"M\n\033Constraint_SL" +
+      "A_Availability\022\032\n\022num_disjoint_paths\030\001 \001" +
+      "(\r\022\022\n\nall_active\030\002 \001(\010\"V\n\036Constraint_SLA" +
+      "_Isolation_level\0224\n\017isolation_level\030\001 \003(" +
+      "\0162\033.context.IsolationLevelEnum\"\366\003\n\nConst" +
+      "raint\022,\n\006custom\030\001 \001(\0132\032.context.Constrai" +
+      "nt_CustomH\000\0220\n\010schedule\030\002 \001(\0132\034.context." +
+      "Constraint_ScheduleH\000\022A\n\021endpoint_locati" +
+      "on\030\003 \001(\0132$.context.Constraint_EndPointLo" +
+      "cationH\000\022A\n\021endpoint_priority\030\004 \001(\0132$.co" +
+      "ntext.Constraint_EndPointPriorityH\000\0228\n\014s" +
+      "la_capacity\030\005 \001(\0132 .context.Constraint_S" +
+      "LA_CapacityH\000\0226\n\013sla_latency\030\006 \001(\0132\037.con" +
+      "text.Constraint_SLA_LatencyH\000\022@\n\020sla_ava" +
+      "ilability\030\007 \001(\0132$.context.Constraint_SLA" +
+      "_AvailabilityH\000\022@\n\rsla_isolation\030\010 \001(\0132\'" +
+      ".context.Constraint_SLA_Isolation_levelH" +
+      "\000B\014\n\nconstraint\"^\n\022TeraFlowController\022&\n" +
+      "\ncontext_id\030\001 \001(\0132\022.context.ContextId\022\022\n" +
+      "\nip_address\030\002 \001(\t\022\014\n\004port\030\003 \001(\r\"U\n\024Authe" +
+      "nticationResult\022&\n\ncontext_id\030\001 \001(\0132\022.co" +
+      "ntext.ContextId\022\025\n\rauthenticated\030\002 \001(\010*j" +
+      "\n\rEventTypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000" +
+      "\022\024\n\020EVENTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDA" +
+      "TE\020\002\022\024\n\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDri" +
+      "verEnum\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027D" +
+      "EVICEDRIVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER" +
+      "_TRANSPORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n" +
+      "\"DEVICEDRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n" +
+      "\027DEVICEDRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOpe" +
+      "rationalStatusEnum\022%\n!DEVICEOPERATIONALS" +
+      "TATUS_UNDEFINED\020\000\022$\n DEVICEOPERATIONALST" +
+      "ATUS_DISABLED\020\001\022#\n\037DEVICEOPERATIONALSTAT" +
+      "US_ENABLED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERV" +
+      "ICETYPE_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022" +
+      "\024\n\020SERVICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAP" +
+      "I_CONNECTIVITY_SERVICE\020\003*\250\001\n\021ServiceStat" +
+      "usEnum\022\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025S" +
+      "ERVICESTATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_" +
+      "ACTIVE\020\002\022!\n\035SERVICESTATUS_PENDING_REMOVA" +
+      "L\020\003\022\036\n\032SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017" +
+      "SliceStatusEnum\022\031\n\025SLICESTATUS_UNDEFINED" +
+      "\020\000\022\027\n\023SLICESTATUS_PLANNED\020\001\022\024\n\020SLICESTAT" +
+      "US_INIT\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLI" +
+      "CESTATUS_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIO" +
+      "LATED\020\005*]\n\020ConfigActionEnum\022\032\n\026CONFIGACT" +
+      "ION_UNDEFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n" +
+      "\023CONFIGACTION_DELETE\020\002*\203\002\n\022IsolationLeve" +
+      "lEnum\022\020\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOL" +
+      "ATION\020\001\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCES" +
+      "S_ISOLATION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATI" +
+      "ON\020\004\022\036\n\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032" +
+      "VIRTUAL_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_" +
+      "FUNCTIONS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATI" +
+      "ON\020\0102\331\023\n\016ContextService\022:\n\016ListContextId" +
+      "s\022\016.context.Empty\032\026.context.ContextIdLis" +
+      "t\"\000\0226\n\014ListContexts\022\016.context.Empty\032\024.co" +
+      "ntext.ContextList\"\000\0224\n\nGetContext\022\022.cont" +
+      "ext.ContextId\032\020.context.Context\"\000\0224\n\nSet" +
+      "Context\022\020.context.Context\032\022.context.Cont" +
+      "extId\"\000\0225\n\rRemoveContext\022\022.context.Conte" +
+      "xtId\032\016.context.Empty\"\000\022=\n\020GetContextEven" +
+      "ts\022\016.context.Empty\032\025.context.ContextEven" +
+      "t\"\0000\001\022@\n\017ListTopologyIds\022\022.context.Conte" +
+      "xtId\032\027.context.TopologyIdList\"\000\022=\n\016ListT" +
+      "opologies\022\022.context.ContextId\032\025.context." +
+      "TopologyList\"\000\0227\n\013GetTopology\022\023.context." +
+      "TopologyId\032\021.context.Topology\"\000\0227\n\013SetTo" +
+      "pology\022\021.context.Topology\032\023.context.Topo" +
+      "logyId\"\000\0227\n\016RemoveTopology\022\023.context.Top" +
+      "ologyId\032\016.context.Empty\"\000\022?\n\021GetTopology" +
+      "Events\022\016.context.Empty\032\026.context.Topolog" +
+      "yEvent\"\0000\001\0228\n\rListDeviceIds\022\016.context.Em" +
+      "pty\032\025.context.DeviceIdList\"\000\0224\n\013ListDevi" +
+      "ces\022\016.context.Empty\032\023.context.DeviceList" +
+      "\"\000\0221\n\tGetDevice\022\021.context.DeviceId\032\017.con" +
+      "text.Device\"\000\0221\n\tSetDevice\022\017.context.Dev" +
+      "ice\032\021.context.DeviceId\"\000\0223\n\014RemoveDevice" +
+      "\022\021.context.DeviceId\032\016.context.Empty\"\000\022;\n" +
+      "\017GetDeviceEvents\022\016.context.Empty\032\024.conte" +
+      "xt.DeviceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.cont" +
+      "ext.Empty\032\023.context.LinkIdList\"\000\0220\n\tList" +
+      "Links\022\016.context.Empty\032\021.context.LinkList" +
+      "\"\000\022+\n\007GetLink\022\017.context.LinkId\032\r.context" +
+      ".Link\"\000\022+\n\007SetLink\022\r.context.Link\032\017.cont" +
+      "ext.LinkId\"\000\022/\n\nRemoveLink\022\017.context.Lin" +
+      "kId\032\016.context.Empty\"\000\0227\n\rGetLinkEvents\022\016" +
+      ".context.Empty\032\022.context.LinkEvent\"\0000\001\022>" +
+      "\n\016ListServiceIds\022\022.context.ContextId\032\026.c" +
+      "ontext.ServiceIdList\"\000\022:\n\014ListServices\022\022" +
+      ".context.ContextId\032\024.context.ServiceList" +
+      "\"\000\0224\n\nGetService\022\022.context.ServiceId\032\020.c" +
+      "ontext.Service\"\000\0224\n\nSetService\022\020.context" +
+      ".Service\032\022.context.ServiceId\"\000\0226\n\014UnsetS" +
+      "ervice\022\020.context.Service\032\022.context.Servi" +
+      "ceId\"\000\0225\n\rRemoveService\022\022.context.Servic" +
+      "eId\032\016.context.Empty\"\000\022=\n\020GetServiceEvent" +
+      "s\022\016.context.Empty\032\025.context.ServiceEvent" +
+      "\"\0000\001\022:\n\014ListSliceIds\022\022.context.ContextId" +
+      "\032\024.context.SliceIdList\"\000\0226\n\nListSlices\022\022" +
+      ".context.ContextId\032\022.context.SliceList\"\000" +
+      "\022.\n\010GetSlice\022\020.context.SliceId\032\016.context" +
+      ".Slice\"\000\022.\n\010SetSlice\022\016.context.Slice\032\020.c" +
+      "ontext.SliceId\"\000\0220\n\nUnsetSlice\022\016.context" +
+      ".Slice\032\020.context.SliceId\"\000\0221\n\013RemoveSlic" +
+      "e\022\020.context.SliceId\032\016.context.Empty\"\000\0229\n" +
+      "\016GetSliceEvents\022\016.context.Empty\032\023.contex" +
+      "t.SliceEvent\"\0000\001\022D\n\021ListConnectionIds\022\022." +
+      "context.ServiceId\032\031.context.ConnectionId" +
+      "List\"\000\022@\n\017ListConnections\022\022.context.Serv" +
+      "iceId\032\027.context.ConnectionList\"\000\022=\n\rGetC" +
+      "onnection\022\025.context.ConnectionId\032\023.conte" +
+      "xt.Connection\"\000\022=\n\rSetConnection\022\023.conte" +
+      "xt.Connection\032\025.context.ConnectionId\"\000\022;" +
+      "\n\020RemoveConnection\022\025.context.ConnectionI" +
+      "d\032\016.context.Empty\"\000\022C\n\023GetConnectionEven" +
+      "ts\022\016.context.Empty\032\030.context.ConnectionE" +
+      "vent\"\0000\001b\006proto3"
     };
     descriptor = com.google.protobuf.Descriptors.FileDescriptor
       .internalBuildGeneratedFileFrom(descriptorData,
@@ -62331,7 +62538,7 @@ public final class ContextOuterClass {
     internal_static_context_DeviceEvent_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_context_DeviceEvent_descriptor,
-        new java.lang.String[] { "Event", "DeviceId", });
+        new java.lang.String[] { "Event", "DeviceId", "DeviceConfig", });
     internal_static_context_LinkId_descriptor =
       getDescriptor().getMessageTypes().get(20);
     internal_static_context_LinkId_fieldAccessorTable = new
diff --git a/src/automation/target/generated-sources/grpc/context/ContextService.java b/src/automation/target/generated-sources/grpc/context/ContextService.java
index d54c56057ca53e40071490d3b9aa313a13a77665..814ea98b65370f8fd3ffd752c77bec04997a5dd6 100644
--- a/src/automation/target/generated-sources/grpc/context/ContextService.java
+++ b/src/automation/target/generated-sources/grpc/context/ContextService.java
@@ -56,6 +56,8 @@ public interface ContextService extends MutinyService {
     
     io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> setService(context.ContextOuterClass.Service request);
     
+    io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request);
+    
     io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request);
     
     io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceIdList> listSliceIds(context.ContextOuterClass.ContextId request);
@@ -66,6 +68,8 @@ public interface ContextService extends MutinyService {
     
     io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> setSlice(context.ContextOuterClass.Slice request);
     
+    io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request);
+    
     io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request);
     
     io.smallrye.mutiny.Uni<context.ContextOuterClass.ConnectionIdList> listConnectionIds(context.ContextOuterClass.ServiceId request);
diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java b/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java
index f552294b8e6d645af41cc30632ae0432504bbc67..2b0099f106265e34d1f60bb3e0ecdc35f81895ee 100644
--- a/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java
+++ b/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java
@@ -208,6 +208,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI
        }
     }
     @Override
+    public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) {
+       try {
+         return delegate.unsetService(request);
+       } catch (UnsupportedOperationException e) {
+          throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
+       }
+    }
+    @Override
     public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) {
        try {
          return delegate.removeService(request);
@@ -248,6 +256,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI
        }
     }
     @Override
+    public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) {
+       try {
+         return delegate.unsetSlice(request);
+       } catch (UnsupportedOperationException e) {
+          throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
+       }
+    }
+    @Override
     public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) {
        try {
          return delegate.removeSlice(request);
diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java b/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java
index c6493bd4d381967238e5eb87dd717f679d028526..c518a0b4622522728e0eb22fdbeb80442b10f7ef 100644
--- a/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java
+++ b/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java
@@ -117,6 +117,10 @@ public class ContextServiceClient implements ContextService, MutinyClient<Mutiny
        return stub.setService(request);
     }
     @Override
+    public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) {
+       return stub.unsetService(request);
+    }
+    @Override
     public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) {
        return stub.removeService(request);
     }
@@ -137,6 +141,10 @@ public class ContextServiceClient implements ContextService, MutinyClient<Mutiny
        return stub.setSlice(request);
     }
     @Override
+    public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) {
+       return stub.unsetSlice(request);
+    }
+    @Override
     public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) {
        return stub.removeSlice(request);
     }
diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java b/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java
index be720c127439e50f68c2518332f85f750d6579ee..f59378086c84d0776cc25fb7aa9640403b072c0f 100644
--- a/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java
+++ b/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java
@@ -882,6 +882,37 @@ public final class ContextServiceGrpc {
     return getSetServiceMethod;
   }
 
+  private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.Service,
+      context.ContextOuterClass.ServiceId> getUnsetServiceMethod;
+
+  @io.grpc.stub.annotations.RpcMethod(
+      fullMethodName = SERVICE_NAME + '/' + "UnsetService",
+      requestType = context.ContextOuterClass.Service.class,
+      responseType = context.ContextOuterClass.ServiceId.class,
+      methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
+  public static io.grpc.MethodDescriptor<context.ContextOuterClass.Service,
+      context.ContextOuterClass.ServiceId> getUnsetServiceMethod() {
+    io.grpc.MethodDescriptor<context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId> getUnsetServiceMethod;
+    if ((getUnsetServiceMethod = ContextServiceGrpc.getUnsetServiceMethod) == null) {
+      synchronized (ContextServiceGrpc.class) {
+        if ((getUnsetServiceMethod = ContextServiceGrpc.getUnsetServiceMethod) == null) {
+          ContextServiceGrpc.getUnsetServiceMethod = getUnsetServiceMethod =
+              io.grpc.MethodDescriptor.<context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId>newBuilder()
+              .setType(io.grpc.MethodDescriptor.MethodType.UNARY)
+              .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UnsetService"))
+              .setSampledToLocalTracing(true)
+              .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
+                  context.ContextOuterClass.Service.getDefaultInstance()))
+              .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
+                  context.ContextOuterClass.ServiceId.getDefaultInstance()))
+              .setSchemaDescriptor(new ContextServiceMethodDescriptorSupplier("UnsetService"))
+              .build();
+        }
+      }
+    }
+    return getUnsetServiceMethod;
+  }
+
   private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.ServiceId,
       context.ContextOuterClass.Empty> getRemoveServiceMethod;
 
@@ -1068,6 +1099,37 @@ public final class ContextServiceGrpc {
     return getSetSliceMethod;
   }
 
+  private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.Slice,
+      context.ContextOuterClass.SliceId> getUnsetSliceMethod;
+
+  @io.grpc.stub.annotations.RpcMethod(
+      fullMethodName = SERVICE_NAME + '/' + "UnsetSlice",
+      requestType = context.ContextOuterClass.Slice.class,
+      responseType = context.ContextOuterClass.SliceId.class,
+      methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
+  public static io.grpc.MethodDescriptor<context.ContextOuterClass.Slice,
+      context.ContextOuterClass.SliceId> getUnsetSliceMethod() {
+    io.grpc.MethodDescriptor<context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId> getUnsetSliceMethod;
+    if ((getUnsetSliceMethod = ContextServiceGrpc.getUnsetSliceMethod) == null) {
+      synchronized (ContextServiceGrpc.class) {
+        if ((getUnsetSliceMethod = ContextServiceGrpc.getUnsetSliceMethod) == null) {
+          ContextServiceGrpc.getUnsetSliceMethod = getUnsetSliceMethod =
+              io.grpc.MethodDescriptor.<context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId>newBuilder()
+              .setType(io.grpc.MethodDescriptor.MethodType.UNARY)
+              .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UnsetSlice"))
+              .setSampledToLocalTracing(true)
+              .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
+                  context.ContextOuterClass.Slice.getDefaultInstance()))
+              .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
+                  context.ContextOuterClass.SliceId.getDefaultInstance()))
+              .setSchemaDescriptor(new ContextServiceMethodDescriptorSupplier("UnsetSlice"))
+              .build();
+        }
+      }
+    }
+    return getUnsetSliceMethod;
+  }
+
   private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.SliceId,
       context.ContextOuterClass.Empty> getRemoveSliceMethod;
 
@@ -1560,6 +1622,13 @@ public final class ContextServiceGrpc {
       io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getSetServiceMethod(), responseObserver);
     }
 
+    /**
+     */
+    public void unsetService(context.ContextOuterClass.Service request,
+        io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId> responseObserver) {
+      io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUnsetServiceMethod(), responseObserver);
+    }
+
     /**
      */
     public void removeService(context.ContextOuterClass.ServiceId request,
@@ -1602,6 +1671,13 @@ public final class ContextServiceGrpc {
       io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getSetSliceMethod(), responseObserver);
     }
 
+    /**
+     */
+    public void unsetSlice(context.ContextOuterClass.Slice request,
+        io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId> responseObserver) {
+      io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUnsetSliceMethod(), responseObserver);
+    }
+
     /**
      */
     public void removeSlice(context.ContextOuterClass.SliceId request,
@@ -1856,6 +1932,13 @@ public final class ContextServiceGrpc {
                 context.ContextOuterClass.Service,
                 context.ContextOuterClass.ServiceId>(
                   this, METHODID_SET_SERVICE)))
+          .addMethod(
+            getUnsetServiceMethod(),
+            io.grpc.stub.ServerCalls.asyncUnaryCall(
+              new MethodHandlers<
+                context.ContextOuterClass.Service,
+                context.ContextOuterClass.ServiceId>(
+                  this, METHODID_UNSET_SERVICE)))
           .addMethod(
             getRemoveServiceMethod(),
             io.grpc.stub.ServerCalls.asyncUnaryCall(
@@ -1898,6 +1981,13 @@ public final class ContextServiceGrpc {
                 context.ContextOuterClass.Slice,
                 context.ContextOuterClass.SliceId>(
                   this, METHODID_SET_SLICE)))
+          .addMethod(
+            getUnsetSliceMethod(),
+            io.grpc.stub.ServerCalls.asyncUnaryCall(
+              new MethodHandlers<
+                context.ContextOuterClass.Slice,
+                context.ContextOuterClass.SliceId>(
+                  this, METHODID_UNSET_SLICE)))
           .addMethod(
             getRemoveSliceMethod(),
             io.grpc.stub.ServerCalls.asyncUnaryCall(
@@ -2196,6 +2286,14 @@ public final class ContextServiceGrpc {
           getChannel().newCall(getSetServiceMethod(), getCallOptions()), request, responseObserver);
     }
 
+    /**
+     */
+    public void unsetService(context.ContextOuterClass.Service request,
+        io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId> responseObserver) {
+      io.grpc.stub.ClientCalls.asyncUnaryCall(
+          getChannel().newCall(getUnsetServiceMethod(), getCallOptions()), request, responseObserver);
+    }
+
     /**
      */
     public void removeService(context.ContextOuterClass.ServiceId request,
@@ -2244,6 +2342,14 @@ public final class ContextServiceGrpc {
           getChannel().newCall(getSetSliceMethod(), getCallOptions()), request, responseObserver);
     }
 
+    /**
+     */
+    public void unsetSlice(context.ContextOuterClass.Slice request,
+        io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId> responseObserver) {
+      io.grpc.stub.ClientCalls.asyncUnaryCall(
+          getChannel().newCall(getUnsetSliceMethod(), getCallOptions()), request, responseObserver);
+    }
+
     /**
      */
     public void removeSlice(context.ContextOuterClass.SliceId request,
@@ -2523,6 +2629,13 @@ public final class ContextServiceGrpc {
           getChannel(), getSetServiceMethod(), getCallOptions(), request);
     }
 
+    /**
+     */
+    public context.ContextOuterClass.ServiceId unsetService(context.ContextOuterClass.Service request) {
+      return io.grpc.stub.ClientCalls.blockingUnaryCall(
+          getChannel(), getUnsetServiceMethod(), getCallOptions(), request);
+    }
+
     /**
      */
     public context.ContextOuterClass.Empty removeService(context.ContextOuterClass.ServiceId request) {
@@ -2566,6 +2679,13 @@ public final class ContextServiceGrpc {
           getChannel(), getSetSliceMethod(), getCallOptions(), request);
     }
 
+    /**
+     */
+    public context.ContextOuterClass.SliceId unsetSlice(context.ContextOuterClass.Slice request) {
+      return io.grpc.stub.ClientCalls.blockingUnaryCall(
+          getChannel(), getUnsetSliceMethod(), getCallOptions(), request);
+    }
+
     /**
      */
     public context.ContextOuterClass.Empty removeSlice(context.ContextOuterClass.SliceId request) {
@@ -2831,6 +2951,14 @@ public final class ContextServiceGrpc {
           getChannel().newCall(getSetServiceMethod(), getCallOptions()), request);
     }
 
+    /**
+     */
+    public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.ServiceId> unsetService(
+        context.ContextOuterClass.Service request) {
+      return io.grpc.stub.ClientCalls.futureUnaryCall(
+          getChannel().newCall(getUnsetServiceMethod(), getCallOptions()), request);
+    }
+
     /**
      */
     public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.Empty> removeService(
@@ -2871,6 +2999,14 @@ public final class ContextServiceGrpc {
           getChannel().newCall(getSetSliceMethod(), getCallOptions()), request);
     }
 
+    /**
+     */
+    public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.SliceId> unsetSlice(
+        context.ContextOuterClass.Slice request) {
+      return io.grpc.stub.ClientCalls.futureUnaryCall(
+          getChannel().newCall(getUnsetSliceMethod(), getCallOptions()), request);
+    }
+
     /**
      */
     public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.Empty> removeSlice(
@@ -2948,20 +3084,22 @@ public final class ContextServiceGrpc {
   private static final int METHODID_LIST_SERVICES = 25;
   private static final int METHODID_GET_SERVICE = 26;
   private static final int METHODID_SET_SERVICE = 27;
-  private static final int METHODID_REMOVE_SERVICE = 28;
-  private static final int METHODID_GET_SERVICE_EVENTS = 29;
-  private static final int METHODID_LIST_SLICE_IDS = 30;
-  private static final int METHODID_LIST_SLICES = 31;
-  private static final int METHODID_GET_SLICE = 32;
-  private static final int METHODID_SET_SLICE = 33;
-  private static final int METHODID_REMOVE_SLICE = 34;
-  private static final int METHODID_GET_SLICE_EVENTS = 35;
-  private static final int METHODID_LIST_CONNECTION_IDS = 36;
-  private static final int METHODID_LIST_CONNECTIONS = 37;
-  private static final int METHODID_GET_CONNECTION = 38;
-  private static final int METHODID_SET_CONNECTION = 39;
-  private static final int METHODID_REMOVE_CONNECTION = 40;
-  private static final int METHODID_GET_CONNECTION_EVENTS = 41;
+  private static final int METHODID_UNSET_SERVICE = 28;
+  private static final int METHODID_REMOVE_SERVICE = 29;
+  private static final int METHODID_GET_SERVICE_EVENTS = 30;
+  private static final int METHODID_LIST_SLICE_IDS = 31;
+  private static final int METHODID_LIST_SLICES = 32;
+  private static final int METHODID_GET_SLICE = 33;
+  private static final int METHODID_SET_SLICE = 34;
+  private static final int METHODID_UNSET_SLICE = 35;
+  private static final int METHODID_REMOVE_SLICE = 36;
+  private static final int METHODID_GET_SLICE_EVENTS = 37;
+  private static final int METHODID_LIST_CONNECTION_IDS = 38;
+  private static final int METHODID_LIST_CONNECTIONS = 39;
+  private static final int METHODID_GET_CONNECTION = 40;
+  private static final int METHODID_SET_CONNECTION = 41;
+  private static final int METHODID_REMOVE_CONNECTION = 42;
+  private static final int METHODID_GET_CONNECTION_EVENTS = 43;
 
   private static final class MethodHandlers<Req, Resp> implements
       io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>,
@@ -3092,6 +3230,10 @@ public final class ContextServiceGrpc {
           serviceImpl.setService((context.ContextOuterClass.Service) request,
               (io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId>) responseObserver);
           break;
+        case METHODID_UNSET_SERVICE:
+          serviceImpl.unsetService((context.ContextOuterClass.Service) request,
+              (io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId>) responseObserver);
+          break;
         case METHODID_REMOVE_SERVICE:
           serviceImpl.removeService((context.ContextOuterClass.ServiceId) request,
               (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver);
@@ -3116,6 +3258,10 @@ public final class ContextServiceGrpc {
           serviceImpl.setSlice((context.ContextOuterClass.Slice) request,
               (io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId>) responseObserver);
           break;
+        case METHODID_UNSET_SLICE:
+          serviceImpl.unsetSlice((context.ContextOuterClass.Slice) request,
+              (io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId>) responseObserver);
+          break;
         case METHODID_REMOVE_SLICE:
           serviceImpl.removeSlice((context.ContextOuterClass.SliceId) request,
               (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver);
@@ -3237,12 +3383,14 @@ public final class ContextServiceGrpc {
               .addMethod(getListServicesMethod())
               .addMethod(getGetServiceMethod())
               .addMethod(getSetServiceMethod())
+              .addMethod(getUnsetServiceMethod())
               .addMethod(getRemoveServiceMethod())
               .addMethod(getGetServiceEventsMethod())
               .addMethod(getListSliceIdsMethod())
               .addMethod(getListSlicesMethod())
               .addMethod(getGetSliceMethod())
               .addMethod(getSetSliceMethod())
+              .addMethod(getUnsetSliceMethod())
               .addMethod(getRemoveSliceMethod())
               .addMethod(getGetSliceEventsMethod())
               .addMethod(getListConnectionIdsMethod())
diff --git a/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java b/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java
index 9f71b53786e40922546dc59cfd4328040a40bd7c..f7d2cb94e339366b54355c7e11b3ee72fa1e415c 100644
--- a/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java
+++ b/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java
@@ -156,6 +156,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
         }
 
         
+        public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) {
+            return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::unsetService);
+        }
+
+        
         public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) {
             return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::removeService);
         }
@@ -181,6 +186,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
         }
 
         
+        public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) {
+            return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::unsetSlice);
+        }
+
+        
         public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) {
             return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::removeSlice);
         }
@@ -383,6 +393,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
         }
 
         
+        public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) {
+            throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
+        }
+
+        
         public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) {
             throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
         }
@@ -408,6 +423,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
         }
 
         
+        public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) {
+            throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
+        }
+
+        
         public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) {
             throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
         }
@@ -670,6 +690,13 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
                                             context.ContextOuterClass.Service,
                                             context.ContextOuterClass.ServiceId>(
                                             this, METHODID_SET_SERVICE, compression)))
+                    .addMethod(
+                            context.ContextServiceGrpc.getUnsetServiceMethod(),
+                            asyncUnaryCall(
+                                    new MethodHandlers<
+                                            context.ContextOuterClass.Service,
+                                            context.ContextOuterClass.ServiceId>(
+                                            this, METHODID_UNSET_SERVICE, compression)))
                     .addMethod(
                             context.ContextServiceGrpc.getRemoveServiceMethod(),
                             asyncUnaryCall(
@@ -712,6 +739,13 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
                                             context.ContextOuterClass.Slice,
                                             context.ContextOuterClass.SliceId>(
                                             this, METHODID_SET_SLICE, compression)))
+                    .addMethod(
+                            context.ContextServiceGrpc.getUnsetSliceMethod(),
+                            asyncUnaryCall(
+                                    new MethodHandlers<
+                                            context.ContextOuterClass.Slice,
+                                            context.ContextOuterClass.SliceId>(
+                                            this, METHODID_UNSET_SLICE, compression)))
                     .addMethod(
                             context.ContextServiceGrpc.getRemoveSliceMethod(),
                             asyncUnaryCall(
@@ -800,20 +834,22 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
     private static final int METHODID_LIST_SERVICES = 25;
     private static final int METHODID_GET_SERVICE = 26;
     private static final int METHODID_SET_SERVICE = 27;
-    private static final int METHODID_REMOVE_SERVICE = 28;
-    private static final int METHODID_GET_SERVICE_EVENTS = 29;
-    private static final int METHODID_LIST_SLICE_IDS = 30;
-    private static final int METHODID_LIST_SLICES = 31;
-    private static final int METHODID_GET_SLICE = 32;
-    private static final int METHODID_SET_SLICE = 33;
-    private static final int METHODID_REMOVE_SLICE = 34;
-    private static final int METHODID_GET_SLICE_EVENTS = 35;
-    private static final int METHODID_LIST_CONNECTION_IDS = 36;
-    private static final int METHODID_LIST_CONNECTIONS = 37;
-    private static final int METHODID_GET_CONNECTION = 38;
-    private static final int METHODID_SET_CONNECTION = 39;
-    private static final int METHODID_REMOVE_CONNECTION = 40;
-    private static final int METHODID_GET_CONNECTION_EVENTS = 41;
+    private static final int METHODID_UNSET_SERVICE = 28;
+    private static final int METHODID_REMOVE_SERVICE = 29;
+    private static final int METHODID_GET_SERVICE_EVENTS = 30;
+    private static final int METHODID_LIST_SLICE_IDS = 31;
+    private static final int METHODID_LIST_SLICES = 32;
+    private static final int METHODID_GET_SLICE = 33;
+    private static final int METHODID_SET_SLICE = 34;
+    private static final int METHODID_UNSET_SLICE = 35;
+    private static final int METHODID_REMOVE_SLICE = 36;
+    private static final int METHODID_GET_SLICE_EVENTS = 37;
+    private static final int METHODID_LIST_CONNECTION_IDS = 38;
+    private static final int METHODID_LIST_CONNECTIONS = 39;
+    private static final int METHODID_GET_CONNECTION = 40;
+    private static final int METHODID_SET_CONNECTION = 41;
+    private static final int METHODID_REMOVE_CONNECTION = 42;
+    private static final int METHODID_GET_CONNECTION_EVENTS = 43;
 
     private static final class MethodHandlers<Req, Resp> implements
             io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>,
@@ -1002,6 +1038,12 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
                             compression,
                             serviceImpl::setService);
                     break;
+                case METHODID_UNSET_SERVICE:
+                    io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Service) request,
+                            (io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId>) responseObserver,
+                            compression,
+                            serviceImpl::unsetService);
+                    break;
                 case METHODID_REMOVE_SERVICE:
                     io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.ServiceId) request,
                             (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver,
@@ -1038,6 +1080,12 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M
                             compression,
                             serviceImpl::setSlice);
                     break;
+                case METHODID_UNSET_SLICE:
+                    io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Slice) request,
+                            (io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId>) responseObserver,
+                            compression,
+                            serviceImpl::unsetSlice);
+                    break;
                 case METHODID_REMOVE_SLICE:
                     io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.SliceId) request,
                             (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver,
diff --git a/src/common/rpc_method_wrapper/ServiceExceptions.py b/src/common/rpc_method_wrapper/ServiceExceptions.py
index f4f0a64cad79c96dc069bd37e8d2c2be5f011c53..e8d5c79acca19117fca53ec216166c01d3f0781d 100644
--- a/src/common/rpc_method_wrapper/ServiceExceptions.py
+++ b/src/common/rpc_method_wrapper/ServiceExceptions.py
@@ -56,3 +56,11 @@ class OperationFailedException(ServiceException):
 
         details = 'Operation({:s}) failed'.format(str(operation))
         super().__init__(grpc.StatusCode.INTERNAL, details, extra_details=extra_details)
+
+class NotImplementedException(ServiceException):
+    def __init__(
+        self, operation : str, extra_details : Union[str, Iterable[str]] = None
+        ) -> None:
+
+        details = 'Operation({:s}) not implemented'.format(str(operation))
+        super().__init__(grpc.StatusCode.UNIMPLEMENTED, details, extra_details=extra_details)
diff --git a/src/common/tests/EventTools.py b/src/common/tests/EventTools.py
index ceff4d60e597690b29d5f1bcac894c081eb88a56..d0f82841395ea77a7c2483099458760769f8c535 100644
--- a/src/common/tests/EventTools.py
+++ b/src/common/tests/EventTools.py
@@ -15,7 +15,7 @@
 import json, logging
 from typing import Dict, List, Tuple
 from common.proto.context_pb2 import (
-    ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, TopologyEvent)
+    ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent)
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.EventsCollector import EventsCollector
 
@@ -32,6 +32,7 @@ CLASSNAME_CONTEXT_EVENT    = class_to_classname(ContextEvent)
 CLASSNAME_TOPOLOGY_EVENT   = class_to_classname(TopologyEvent)
 CLASSNAME_DEVICE_EVENT     = class_to_classname(DeviceEvent)
 CLASSNAME_LINK_EVENT       = class_to_classname(LinkEvent)
+CLASSNAME_SLICE_EVENT      = class_to_classname(SliceEvent)
 CLASSNAME_SERVICE_EVENT    = class_to_classname(ServiceEvent)
 CLASSNAME_CONNECTION_EVENT = class_to_classname(ConnectionEvent)
 
@@ -40,6 +41,7 @@ EVENT_CLASS_NAME__TO__ENTITY_ID_SELECTOR = {
     CLASSNAME_TOPOLOGY_EVENT  : lambda event: event.topology_id,
     CLASSNAME_DEVICE_EVENT    : lambda event: event.device_id,
     CLASSNAME_LINK_EVENT      : lambda event: event.link_id,
+    CLASSNAME_SLICE_EVENT     : lambda event: event.slice_id,
     CLASSNAME_SERVICE_EVENT   : lambda event: event.service_id,
     CLASSNAME_CONNECTION_EVENT: lambda event: event.connection_id,
 }
diff --git a/src/common/tools/mutex_queues/MutexQueues.py b/src/common/tools/mutex_queues/MutexQueues.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3ab760f281c73ae2f308044d67b2d2b81aef142
--- /dev/null
+++ b/src/common/tools/mutex_queues/MutexQueues.py
@@ -0,0 +1,78 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# MutexQueues:
+# ------------
+# This class enables to schedule and serialize operations concurrently issued
+# over a number of resources. For instance, when multiple components want to
+# configure devices through the Device component, configuration operations
+# have to be serialized to prevent data corruptions, and race conditions, etc.
+# Usage Example:
+#   class Servicer():
+#       def __init__(self):
+#           # init other stuff
+#           self.drivers = dict()
+#           self.mutex_queues = MutexQueues()
+#       
+#       def configure_device(self, device_uuid, settings):
+#           self.mutex_queues.wait_my_turn(device_uuid)
+#           driver = self.drivers.get(device_uuid)
+#           if driver is None:
+#               driver = Driver(device_uuid)
+#               self.drivers[device_uuid] = driver
+#           driver.configure(settings)
+#           self.mutex_queues.signal_done(device_uuid)
+
+import threading
+from queue import Queue
+from typing import Dict
+
+class MutexQueues:
+    def __init__(self) -> None:
+        # lock to protect dictionary updates
+        self.lock = threading.Lock()
+
+        # dictionaty of queues of mutexes: queue_name => queue[mutex]
+        # first mutex is the running one
+        self.mutex_queues : Dict[str, Queue[threading.Event]] = dict()
+    
+    def wait_my_turn(self, queue_name : str) -> None:
+        # create my mutex and enqueue it
+        mutex = threading.Event()
+        with self.lock:
+            queue : Queue = self.mutex_queues.setdefault(queue_name, Queue())
+            first_in_queue = (queue.qsize() == 0)
+            queue.put_nowait(mutex)
+
+        # if I'm the first in the queue upon addition, means there are no running tasks
+        # directly return without waiting
+        if first_in_queue: return
+
+        # otherwise, wait for my turn in the queue
+        mutex.wait()
+
+    def signal_done(self, queue_name : str) -> None:
+        # I'm done with my work
+        with self.lock:
+            queue : Queue = self.mutex_queues.setdefault(queue_name, Queue())
+            
+            # remove muself from the queue
+            queue.get_nowait()
+
+            # if there are no other tasks queued, return
+            if queue.qsize() == 0: return
+
+            # otherwise, signal the next task in the queue to start
+            next_mutex : threading.Event = queue.queue[0]
+            next_mutex.set()
diff --git a/src/common/tools/mutex_queues/__init__.py b/src/common/tools/mutex_queues/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/common/tools/mutex_queues/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py
index 51f75e6dbe5e430330e697da772d65703f7568c7..62f3dcbda148f1c624265ae7d76b0c17f5d36959 100644
--- a/src/common/tools/object_factory/Service.py
+++ b/src/common/tools/object_factory/Service.py
@@ -44,10 +44,20 @@ def json_service(
 
 def json_service_l3nm_planned(
         service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
-        config_rules : List[Dict] = []
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID
     ):
 
     return json_service(
-        service_uuid, ServiceTypeEnum.SERVICETYPE_L3NM, context_id=json_context_id(DEFAULT_CONTEXT_UUID),
+        service_uuid, ServiceTypeEnum.SERVICETYPE_L3NM, context_id=json_context_id(context_uuid),
+        status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints,
+        config_rules=config_rules)
+
+def json_service_tapi_planned(
+        service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID
+    ):
+
+    return json_service(
+        service_uuid, ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, context_id=json_context_id(context_uuid),
         status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints,
         config_rules=config_rules)
diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py
index 345b2fdd6950ecda802e8bd1c86e1421b5c60d84..e80681e177f0f0def3dbe75d76e7e65ceaca1e87 100644
--- a/src/compute/service/__main__.py
+++ b/src/compute/service/__main__.py
@@ -39,6 +39,8 @@ def main():
     wait_for_environment_variables([
         get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
         get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     signal.signal(signal.SIGINT,  signal_handler)
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
index c77d714a94fa8d2d4ee9cd2c3db06949665a489c..7e050289f19b93dc710185c2b29b326bbfd156d2 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py
@@ -16,12 +16,11 @@ import logging
 from flask import request
 from flask.json import jsonify
 from flask_restful import Resource
-from common.Constants import DEFAULT_CONTEXT_UUID
-from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, SliceStatusEnum
+from common.proto.context_pb2 import SliceStatusEnum
 from context.client.ContextClient import ContextClient
-from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
 from .tools.Authentication import HTTP_AUTH
-from .tools.ContextMethods import get_service, get_slice
+from .tools.ContextMethods import get_slice
 from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR
 
 LOGGER = logging.getLogger(__name__)
@@ -32,31 +31,22 @@ class L2VPN_Service(Resource):
         LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id)))
         LOGGER.debug('Request: {:s}'.format(str(request)))
 
-        response = jsonify({})
         try:
             context_client = ContextClient()
 
-            target = get_service(context_client, vpn_id)
-            if target is not None:
-                if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member
-                    raise Exception('Service retrieval failed. Wrong Service Id was returned')
-                service_ready_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE
-                service_status = target.service_status.service_status # pylint: disable=no-member
-                response.status_code = HTTP_OK if service_status == service_ready_status else HTTP_GATEWAYTIMEOUT
-                return response
-
             target = get_slice(context_client, vpn_id)
-            if target is not None:
-                if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member
-                    raise Exception('Slice retrieval failed. Wrong Slice Id was returned')
-                slice_ready_status = SliceStatusEnum.SLICESTATUS_ACTIVE
-                slice_status = target.slice_status.slice_status # pylint: disable=no-member
-                response.status_code = HTTP_OK if slice_status == slice_ready_status else HTTP_GATEWAYTIMEOUT
-                return response
+            if target is None:
+                raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
-            raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
+            if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member
+                raise Exception('Slice retrieval failed. Wrong Slice Id was returned')
+
+            slice_ready_status = SliceStatusEnum.SLICESTATUS_ACTIVE
+            slice_status = target.slice_status.slice_status # pylint: disable=no-member
+            response = jsonify({})
+            response.status_code = HTTP_OK if slice_status == slice_ready_status else HTTP_GATEWAYTIMEOUT
         except Exception as e: # pylint: disable=broad-except
-            LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(request)))
+            LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(vpn_id)))
             response = jsonify({'error': str(e)})
             response.status_code = HTTP_SERVERERROR
         return response
@@ -66,18 +56,21 @@ class L2VPN_Service(Resource):
         LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id)))
         LOGGER.debug('Request: {:s}'.format(str(request)))
 
-        # pylint: disable=no-member
-        service_id_request = ServiceId()
-        service_id_request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID
-        service_id_request.service_uuid.uuid = vpn_id
-
         try:
-            service_client = ServiceClient()
-            service_client.DeleteService(service_id_request)
+            context_client = ContextClient()
+
+            target = get_slice(context_client, vpn_id)
+            if target is None:
+                LOGGER.warning('VPN({:s}) not found in database. Nothing done.'.format(str(vpn_id)))
+            else:
+                if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member
+                    raise Exception('Slice retrieval failed. Wrong Slice Id was returned')
+                slice_client = SliceClient()
+                slice_client.DeleteSlice(target.slice_id)
             response = jsonify({})
             response.status_code = HTTP_NOCONTENT
         except Exception as e: # pylint: disable=broad-except
-            LOGGER.exception('Something went wrong Deleting Service {:s}'.format(str(request)))
+            LOGGER.exception('Something went wrong Deleting VPN({:s})'.format(str(vpn_id)))
             response = jsonify({'error': str(e)})
             response.status_code = HTTP_SERVERERROR
         return response
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
index 7b959b2895d0f0acd27058fcb5e9a571cf6553d2..f27d852f017a08cb8b854cc19568280b9de14470 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
@@ -19,8 +19,7 @@ from flask.json import jsonify
 from flask_restful import Resource
 from werkzeug.exceptions import UnsupportedMediaType
 from common.Constants import DEFAULT_CONTEXT_UUID
-from common.proto.context_pb2 import Service, ServiceStatusEnum, ServiceTypeEnum, SliceStatusEnum, Slice
-from service.client.ServiceClient import ServiceClient
+from common.proto.context_pb2 import SliceStatusEnum, Slice
 from slice.client.SliceClient import SliceClient
 from .schemas.vpn_service import SCHEMA_VPN_SERVICE
 from .tools.Authentication import HTTP_AUTH
@@ -44,30 +43,16 @@ class L2VPN_Services(Resource):
         vpn_services : List[Dict] = request_data['ietf-l2vpn-svc:vpn-service']
         for vpn_service in vpn_services:
             try:
-                vpn_service_type = vpn_service['vpn-svc-type']
-                if vpn_service_type == 'vpws':
-                    # pylint: disable=no-member
-                    service_request = Service()
-                    service_request.service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID
-                    service_request.service_id.service_uuid.uuid = vpn_service['vpn-id']
-                    service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
-                    service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
-
-                    service_client = ServiceClient()
-                    service_reply = service_client.CreateService(service_request)
-                    if service_reply != service_request.service_id: # pylint: disable=no-member
-                        raise Exception('Service creation failed. Wrong Service Id was returned')
-                elif vpn_service_type == 'vpls':
-                    # pylint: disable=no-member
-                    slice_request = Slice()
-                    slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID
-                    slice_request.slice_id.slice_uuid.uuid = vpn_service['vpn-id']
-                    slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED
-
-                    slice_client = SliceClient()
-                    slice_reply = slice_client.CreateSlice(slice_request)
-                    if slice_reply != slice_request.slice_id: # pylint: disable=no-member
-                        raise Exception('Slice creation failed. Wrong Slice Id was returned')
+                # pylint: disable=no-member
+                slice_request = Slice()
+                slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID
+                slice_request.slice_id.slice_uuid.uuid = vpn_service['vpn-id']
+                slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED
+
+                slice_client = SliceClient()
+                slice_reply = slice_client.CreateSlice(slice_request)
+                if slice_reply != slice_request.slice_id: # pylint: disable=no-member
+                    raise Exception('Slice creation failed. Wrong Slice Id was returned')
 
                 response = jsonify({})
                 response.status_code = HTTP_CREATED
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index 8be63895b813d7411b76ddeb33902babbf4c9743..3cc823a2aa7a06de6cb591ef6d668ba7eeef5cbd 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -12,169 +12,113 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ctypes import Union
-import json, logging
-from typing import Dict
+import logging
+from typing import Dict, Optional
 from flask import request
 from flask.json import jsonify
 from flask.wrappers import Response
 from flask_restful import Resource
 from werkzeug.exceptions import UnsupportedMediaType
-from common.proto.context_pb2 import ConfigActionEnum, Service, Slice
+from common.proto.context_pb2 import Slice
+from common.tools.grpc.ConfigRules import update_config_rule_custom
+from common.tools.grpc.Constraints import (
+    update_constraint_custom, update_constraint_endpoint_location, update_constraint_endpoint_priority,
+    update_constraint_sla_availability)
+from common.tools.grpc.EndPointIds import update_endpoint_ids
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
-from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
 from .schemas.site_network_access import SCHEMA_SITE_NETWORK_ACCESS
 from .tools.Authentication import HTTP_AUTH
-from .tools.ContextMethods import get_service, get_slice
+from .tools.ContextMethods import get_slice
 from .tools.HttpStatusCodes import HTTP_NOCONTENT, HTTP_SERVERERROR
 from .tools.Validator import validate_message
-from .Constants import BEARER_MAPPINGS, DEFAULT_ADDRESS_FAMILIES, DEFAULT_BGP_AS, DEFAULT_BGP_ROUTE_TARGET, DEFAULT_MTU
+from .Constants import (
+    BEARER_MAPPINGS, DEFAULT_ADDRESS_FAMILIES, DEFAULT_BGP_AS, DEFAULT_BGP_ROUTE_TARGET, DEFAULT_MTU)
 
 LOGGER = logging.getLogger(__name__)
 
-def process_site_network_access(context_client : ContextClient, site_network_access : Dict) -> Service:
+def process_site_network_access(context_client : ContextClient, site_id : str, site_network_access : Dict) -> Slice:
     vpn_id = site_network_access['vpn-attachment']['vpn-id']
-    cvlan_id = site_network_access['connection']['tagged-interface']['dot1q-vlan-tagged']['cvlan-id']
+    encapsulation_type = site_network_access['connection']['encapsulation-type']
+    cvlan_id = site_network_access['connection']['tagged-interface'][encapsulation_type]['cvlan-id']
+
     bearer_reference = site_network_access['bearer']['bearer-reference']
 
+    access_priority : Optional[int] = site_network_access.get('availability', {}).get('access-priority')
+    single_active   : bool = len(site_network_access.get('availability', {}).get('single-active', [])) > 0
+    all_active      : bool = len(site_network_access.get('availability', {}).get('all-active', [])) > 0
+
+    diversity_constraints = site_network_access.get('access-diversity', {}).get('constraints', {}).get('constraint', [])
+    raise_if_differs = True
+    diversity_constraints = {
+        constraint['constraint-type']:([
+            target[0]
+            for target in constraint['target'].items()
+            if len(target[1]) == 1
+        ][0], raise_if_differs)
+        for constraint in diversity_constraints
+    }
+
     mapping = BEARER_MAPPINGS.get(bearer_reference)
     if mapping is None:
         msg = 'Specified Bearer({:s}) is not configured.'
         raise Exception(msg.format(str(bearer_reference)))
-    device_uuid,endpoint_uuid,router_id,route_distinguisher,sub_if_index,address_ip,address_prefix = mapping
+    (
+        device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index,
+        address_ip, address_prefix, remote_router, circuit_id
+    ) = mapping
 
-    target : Union[Service, Slice, None] = None
-    if target is None: target = get_service(context_client, vpn_id)
-    if target is None: target = get_slice  (context_client, vpn_id)
+    target = get_slice(context_client, vpn_id)
     if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
-    # pylint: disable=no-member
-    endpoint_ids = target.service_endpoint_ids if isinstance(target, Service) else target.slice_endpoint_ids
-
-    for endpoint_id in endpoint_ids:
-        if endpoint_id.device_id.device_uuid.uuid != device_uuid: continue
-        if endpoint_id.endpoint_uuid.uuid != endpoint_uuid: continue
-        break   # found, do nothing
-    else:
-        # not found, add it
-        endpoint_id = endpoint_ids.add()
-        endpoint_id.device_id.device_uuid.uuid = device_uuid
-        endpoint_id.endpoint_uuid.uuid = endpoint_uuid
-
-    if isinstance(target, Slice): return target
-
-    for config_rule in target.service_config.config_rules:                  # pylint: disable=no-member
-        if config_rule.WhichOneof('config_rule') != 'custom': continue
-        if config_rule.custom.resource_key != '/settings': continue
-        json_settings = json.loads(config_rule.custom.resource_value)
-
-        if 'mtu' not in json_settings:                                      # missing, add it
-            json_settings['mtu'] = DEFAULT_MTU
-        elif json_settings['mtu'] != DEFAULT_MTU:                           # differs, raise exception
-            msg = 'Specified MTU({:s}) differs from Service MTU({:s})'
-            raise Exception(msg.format(str(json_settings['mtu']), str(DEFAULT_MTU)))
-
-        if 'address_families' not in json_settings:                         # missing, add it
-            json_settings['address_families'] = DEFAULT_ADDRESS_FAMILIES
-        elif json_settings['address_families'] != DEFAULT_ADDRESS_FAMILIES: # differs, raise exception
-            msg = 'Specified AddressFamilies({:s}) differs from Service AddressFamilies({:s})'
-            raise Exception(msg.format(str(json_settings['address_families']), str(DEFAULT_ADDRESS_FAMILIES)))
-
-        if 'bgp_as' not in json_settings:                                   # missing, add it
-            json_settings['bgp_as'] = DEFAULT_BGP_AS
-        elif json_settings['bgp_as'] != DEFAULT_BGP_AS:                     # differs, raise exception
-            msg = 'Specified BgpAs({:s}) differs from Service BgpAs({:s})'
-            raise Exception(msg.format(str(json_settings['bgp_as']), str(DEFAULT_BGP_AS)))
-
-        if 'bgp_route_target' not in json_settings:                         # missing, add it
-            json_settings['bgp_route_target'] = DEFAULT_BGP_ROUTE_TARGET
-        elif json_settings['bgp_route_target'] != DEFAULT_BGP_ROUTE_TARGET: # differs, raise exception
-            msg = 'Specified BgpRouteTarget({:s}) differs from Service BgpRouteTarget({:s})'
-            raise Exception(msg.format(str(json_settings['bgp_route_target']), str(DEFAULT_BGP_ROUTE_TARGET)))
-
-        config_rule.custom.resource_value = json.dumps(json_settings, sort_keys=True)
-        break
-    else:
-        # not found, add it
-        config_rule = target.service_config.config_rules.add()              # pylint: disable=no-member
-        config_rule.action = ConfigActionEnum.CONFIGACTION_SET
-        config_rule.custom.resource_key = '/settings'
-        config_rule.custom.resource_value = json.dumps({
-            'mtu'             : DEFAULT_MTU,
-            'address_families': DEFAULT_ADDRESS_FAMILIES,
-            'bgp_as'          : DEFAULT_BGP_AS,
-            'bgp_route_target': DEFAULT_BGP_ROUTE_TARGET,
-        }, sort_keys=True)
+    endpoint_ids = target.slice_endpoint_ids        # pylint: disable=no-member
+    config_rules = target.slice_config.config_rules # pylint: disable=no-member
+    constraints  = target.slice_constraints         # pylint: disable=no-member
+
+    endpoint_id = update_endpoint_ids(endpoint_ids, device_uuid, endpoint_uuid)
+
+    service_settings_key = '/settings'
+    update_config_rule_custom(config_rules, service_settings_key, {
+        'mtu'             : (DEFAULT_MTU,              True),
+        'address_families': (DEFAULT_ADDRESS_FAMILIES, True),
+        'bgp_as'          : (DEFAULT_BGP_AS,           True),
+        'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True),
+    })
 
     endpoint_settings_key = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid)
-    for config_rule in target.service_config.config_rules:                  # pylint: disable=no-member
-        if config_rule.WhichOneof('config_rule') != 'custom': continue
-        if config_rule.custom.resource_key != endpoint_settings_key: continue
-        json_settings = json.loads(config_rule.custom.resource_value)
-
-        if 'router_id' not in json_settings:                                # missing, add it
-            json_settings['router_id'] = router_id
-        elif json_settings['router_id'] != router_id:                       # differs, raise exception
-            msg = 'Specified RouterId({:s}) differs from Service RouterId({:s})'
-            raise Exception(msg.format(str(json_settings['router_id']), str(router_id)))
-
-        if 'route_distinguisher' not in json_settings:                      # missing, add it
-            json_settings['route_distinguisher'] = route_distinguisher
-        elif json_settings['route_distinguisher'] != route_distinguisher:   # differs, raise exception
-            msg = 'Specified RouteDistinguisher({:s}) differs from Service RouteDistinguisher({:s})'
-            raise Exception(msg.format(str(json_settings['route_distinguisher']), str(route_distinguisher)))
-
-        if 'sub_interface_index' not in json_settings:                      # missing, add it
-            json_settings['sub_interface_index'] = sub_if_index
-        elif json_settings['sub_interface_index'] != sub_if_index:   # differs, raise exception
-            msg = 'Specified SubInterfaceIndex({:s}) differs from Service SubInterfaceIndex({:s})'
-            raise Exception(msg.format(
-                str(json_settings['sub_interface_index']), str(sub_if_index)))
-
-        if 'vlan_id' not in json_settings:                                  # missing, add it
-            json_settings['vlan_id'] = cvlan_id
-        elif json_settings['vlan_id'] != cvlan_id:                          # differs, raise exception
-            msg = 'Specified VLANId({:s}) differs from Service VLANId({:s})'
-            raise Exception(msg.format(
-                str(json_settings['vlan_id']), str(cvlan_id)))
-
-        if 'address_ip' not in json_settings:                               # missing, add it
-            json_settings['address_ip'] = address_ip
-        elif json_settings['address_ip'] != address_ip:                     # differs, raise exception
-            msg = 'Specified AddressIP({:s}) differs from Service AddressIP({:s})'
-            raise Exception(msg.format(
-                str(json_settings['address_ip']), str(address_ip)))
-
-        if 'address_prefix' not in json_settings:                           # missing, add it
-            json_settings['address_prefix'] = address_prefix
-        elif json_settings['address_prefix'] != address_prefix:             # differs, raise exception
-            msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})'
-            raise Exception(msg.format(
-                str(json_settings['address_prefix']), str(address_prefix)))
-
-        config_rule.custom.resource_value = json.dumps(json_settings, sort_keys=True)
-        break
-    else:
-        # not found, add it
-        config_rule = target.service_config.config_rules.add()              # pylint: disable=no-member
-        config_rule.action = ConfigActionEnum.CONFIGACTION_SET
-        config_rule.custom.resource_key = endpoint_settings_key
-        config_rule.custom.resource_value = json.dumps({
-            'router_id': router_id,
-            'route_distinguisher': route_distinguisher,
-            'sub_interface_index': sub_if_index,
-            'vlan_id': cvlan_id,
-            'address_ip': address_ip,
-            'address_prefix': address_prefix,
-        }, sort_keys=True)
+    field_updates = {}
+    if router_id      is not None: field_updates['router_id'          ] = (router_id,      True)
+    if route_dist     is not None: field_updates['route_distinguisher'] = (route_dist,     True)
+    if sub_if_index   is not None: field_updates['sub_interface_index'] = (sub_if_index,   True)
+    if cvlan_id       is not None: field_updates['vlan_id'            ] = (cvlan_id,       True)
+    if address_ip     is not None: field_updates['address_ip'         ] = (address_ip,     True)
+    if address_prefix is not None: field_updates['address_prefix'     ] = (address_prefix, True)
+    if remote_router  is not None: field_updates['remote_router'      ] = (remote_router,  True)
+    if circuit_id     is not None: field_updates['circuit_id'         ] = (circuit_id,     True)
+    update_config_rule_custom(config_rules, endpoint_settings_key, field_updates)
+
+    if len(diversity_constraints) > 0:
+        update_constraint_custom(constraints, 'diversity', diversity_constraints)
+
+    update_constraint_endpoint_location(constraints, endpoint_id, region=site_id)
+    if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority)
+    if single_active or all_active:
+        # assume 1 disjoint path per endpoint/location included in service/slice
+        location_endpoints = {}
+        for constraint in constraints:
+            if constraint.WhichOneof('constraint') != 'endpoint_location': continue
+            str_endpoint_id = grpc_message_to_json_string(constraint.endpoint_location.endpoint_id)
+            str_location_id = grpc_message_to_json_string(constraint.endpoint_location.location)
+            location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id)
+        num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()}
+        num_disjoint_paths = min(num_endpoints_per_location)
+        update_constraint_sla_availability(constraints, num_disjoint_paths, all_active)
 
     return target
 
 def process_list_site_network_access(
-        context_client : ContextClient, service_client : ServiceClient, slice_client : SliceClient,
-        request_data : Dict
+        context_client : ContextClient, slice_client : SliceClient, site_id : str, request_data : Dict
     ) -> Response:
 
     LOGGER.debug('Request: {:s}'.format(str(request_data)))
@@ -182,21 +126,14 @@ def process_list_site_network_access(
 
     errors = []
     for site_network_access in request_data['ietf-l2vpn-svc:site-network-access']:
-        sna_request = process_site_network_access(context_client, site_network_access)
+        sna_request = process_site_network_access(context_client, site_id, site_network_access)
         LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request)))
         try:
-            if isinstance(sna_request, Service):
-                sna_reply = service_client.UpdateService(sna_request)
-                if sna_reply != sna_request.service_id: # pylint: disable=no-member
-                    raise Exception('Service update failed. Wrong Service Id was returned')
-            elif isinstance(sna_request, Slice):
-                sna_reply = slice_client.UpdateSlice(sna_request)
-                if sna_reply != sna_request.slice_id: # pylint: disable=no-member
-                    raise Exception('Slice update failed. Wrong Slice Id was returned')
-            else:
-                raise NotImplementedError('Support for Class({:s}) not implemented'.format(str(type(sna_request))))
+            sna_reply = slice_client.UpdateSlice(sna_request)
+            if sna_reply != sna_request.slice_id: # pylint: disable=no-member
+                raise Exception('Slice update failed. Wrong Slice Id was returned')
         except Exception as e: # pylint: disable=broad-except
-            msg = 'Something went wrong Updating Service {:s}'
+            msg = 'Something went wrong Updating VPN {:s}'
             LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request)))
             errors.append({'error': str(e)})
 
@@ -210,15 +147,13 @@ class L2VPN_SiteNetworkAccesses(Resource):
         if not request.is_json: raise UnsupportedMediaType('JSON payload is required')
         LOGGER.debug('Site_Id: {:s}'.format(str(site_id)))
         context_client = ContextClient()
-        service_client = ServiceClient()
         slice_client = SliceClient()
-        return process_list_site_network_access(context_client, service_client, slice_client, request.json)
+        return process_list_site_network_access(context_client, slice_client, site_id, request.json)
 
     @HTTP_AUTH.login_required
     def put(self, site_id : str):
         if not request.is_json: raise UnsupportedMediaType('JSON payload is required')
         LOGGER.debug('Site_Id: {:s}'.format(str(site_id)))
         context_client = ContextClient()
-        service_client = ServiceClient()
         slice_client = SliceClient()
-        return process_list_site_network_access(context_client, service_client, slice_client, request.json)
+        return process_list_site_network_access(context_client, slice_client, site_id, request.json)
diff --git a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py b/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py
index b9639e8046593c1dbf4017cff963ceb7c51d0532..e1273b4e483a06df23d94bdf107005ce7585fb5e 100644
--- a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py
+++ b/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py
@@ -33,6 +33,7 @@ the Layer 2 service.
 import requests
 import uuid
 import logging
+import copy
 #from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
 from .sdnconn import SdnConnectorBase, SdnConnectorError
 
@@ -222,8 +223,29 @@ class WimconnectorIETFL2VPN(SdnConnectorBase):
                 http_code=response_service_creation.status_code,
             )
 
-        """Second step, create the connections and vpn attachments"""
+        self.logger.info('connection_points = {:s}'.format(str(connection_points)))
+
+        # Check if protected paths are requested
+        extended_connection_points = []
         for connection_point in connection_points:
+            extended_connection_points.append(connection_point)
+
+            connection_point_wan_info = self.search_mapp(connection_point)
+            service_mapping_info = connection_point_wan_info.get('service_mapping_info', {})
+            redundant_service_endpoint_ids = service_mapping_info.get('redundant')
+
+            if redundant_service_endpoint_ids is None: continue
+            if len(redundant_service_endpoint_ids) == 0: continue
+
+            for redundant_service_endpoint_id in redundant_service_endpoint_ids:
+                redundant_connection_point = copy.deepcopy(connection_point)
+                redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id
+                extended_connection_points.append(redundant_connection_point)
+
+        self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points)))
+
+        """Second step, create the connections and vpn attachments"""
+        for connection_point in extended_connection_points:
             connection_point_wan_info = self.search_mapp(connection_point)
             site_network_access = {}
             connection = {}
@@ -264,6 +286,23 @@ class WimconnectorIETFL2VPN(SdnConnectorBase):
             site_network_access["bearer"] = connection_point_wan_info[
                 "service_mapping_info"
             ]["bearer"]
+
+            access_priority = connection_point_wan_info["service_mapping_info"].get("priority")
+            if access_priority is not None:
+                availability = {}
+                availability["access-priority"] = access_priority
+                availability["single-active"] = [None]
+                site_network_access["availability"] = availability
+
+                constraint = {}
+                constraint['constraint-type'] = 'end-to-end-diverse'
+                constraint['target'] = {'all-other-accesses': [None]}
+
+                access_diversity = {}
+                access_diversity['constraints'] = {'constraint': []}
+                access_diversity['constraints']['constraint'].append(constraint)
+                site_network_access["access-diversity"] = access_diversity
+
             site_network_accesses = {}
             site_network_access_list = []
             site_network_access_list.append(site_network_access)
@@ -332,7 +371,7 @@ class WimconnectorIETFL2VPN(SdnConnectorBase):
                     self.delete_connectivity_service(vpn_service["vpn-id"])
 
                     raise SdnConnectorError(
-                        "Request no accepted",
+                        "Request not accepted",
                         http_code=response_endpoint_site_network_access_creation.status_code,
                     )
             except requests.exceptions.ConnectionError:
diff --git a/src/context/tests/context_report.xml b/src/context/tests/context_report.xml
deleted file mode 100644
index 5ee1c17cd6f59c58d55a5eba38de7ea0366a757c..0000000000000000000000000000000000000000
--- a/src/context/tests/context_report.xml
+++ /dev/null
@@ -1,1539 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="0" failures="25" skipped="0" tests="51" time="8.764" timestamp="2022-07-29T09:29:23.786468" hostname="613b7e973910"><testcase classname="context.tests.test_unitary" name="test_grpc_context[all_inmemory]" time="0.028" /><testcase classname="context.tests.test_unitary" name="test_grpc_topology[all_inmemory]" time="0.026" /><testcase classname="context.tests.test_unitary" name="test_grpc_device[all_inmemory]" time="0.139" /><testcase classname="context.tests.test_unitary" name="test_grpc_link[all_inmemory]" time="0.139" /><testcase classname="context.tests.test_unitary" name="test_grpc_service[all_inmemory]" time="0.152" /><testcase classname="context.tests.test_unitary" name="test_grpc_connection[all_inmemory]" time="0.274" /><testcase classname="context.tests.test_unitary" name="test_rest_populate_database[all_inmemory]" time="0.093" /><testcase classname="context.tests.test_unitary" name="test_rest_get_context_ids[all_inmemory]" time="1.033" /><testcase classname="context.tests.test_unitary" name="test_rest_get_contexts[all_inmemory]" time="0.009" /><testcase classname="context.tests.test_unitary" name="test_rest_get_context[all_inmemory]" time="0.009" /><testcase classname="context.tests.test_unitary" name="test_rest_get_topology_ids[all_inmemory]" time="0.006" /><testcase classname="context.tests.test_unitary" name="test_rest_get_topologies[all_inmemory]" time="0.013" /><testcase classname="context.tests.test_unitary" name="test_rest_get_topology[all_inmemory]" time="0.012" /><testcase classname="context.tests.test_unitary" name="test_rest_get_service_ids[all_inmemory]" time="0.007" /><testcase classname="context.tests.test_unitary" name="test_rest_get_services[all_inmemory]" time="0.039" /><testcase classname="context.tests.test_unitary" name="test_rest_get_service[all_inmemory]" time="0.017" /><testcase classname="context.tests.test_unitary" name="test_rest_get_device_ids[all_inmemory]" time="0.005" /><testcase classname="context.tests.test_unitary" name="test_rest_get_devices[all_inmemory]" time="0.070" /><testcase classname="context.tests.test_unitary" name="test_rest_get_device[all_inmemory]" time="0.027" /><testcase classname="context.tests.test_unitary" name="test_rest_get_link_ids[all_inmemory]" time="0.005" /><testcase classname="context.tests.test_unitary" name="test_rest_get_links[all_inmemory]" time="0.023" /><testcase classname="context.tests.test_unitary" name="test_rest_get_link[all_inmemory]" time="0.011" /><testcase classname="context.tests.test_unitary" name="test_rest_get_connection_ids[all_inmemory]" time="0.007" /><testcase classname="context.tests.test_unitary" name="test_rest_get_connections[all_inmemory]" time="0.032" /><testcase classname="context.tests.test_unitary" name="test_rest_get_connection[all_inmemory]" time="0.032" /><testcase classname="context.tests.test_unitary" name="test_grpc_context[all_redis]" time="0.477"><failure message="redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.">self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-&gt;           sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:607: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = &lt;redis.retry.Retry object at 0x7fb0186487f0&gt;
-do = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb01a79edc0&gt;
-fail = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186538b0&gt;
-
-    def call_with_retry(self, do, fail):
-        """
-        Execute an operation that might fail and returns its result, or
-        raise the exception that was thrown depending on the `Backoff` object.
-        `do`: the operation to call. Expects no argument.
-        `fail`: the failure handler, expects the last error that was thrown
-        """
-        self._backoff.reset()
-        failures = 0
-        while True:
-            try:
-&gt;               return do()
-
-/usr/local/lib/python3.9/site-packages/redis/retry.py:45: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-&gt;       lambda: self._connect(), lambda error: self.disconnect(error)
-    )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:608: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-                sock.connect(socket_address)
-    
-                # set the socket_timeout now that we're connected
-                sock.settimeout(self.socket_timeout)
-                return sock
-    
-            except OSError as _:
-                err = _
-                if sock is not None:
-                    sock.close()
-    
-        if err is not None:
-&gt;           raise err
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:673: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-&gt;               sock.connect(socket_address)
-E               ConnectionRefusedError: [Errno 111] Connection refused
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError
-
-During handling of the above exception, another exception occurred:
-
-context_client_grpc = &lt;context.client.ContextClient.ContextClient object at 0x7fb018f15a30&gt;
-context_db_mb = (&lt;common.orm.Database.Database object at 0x7fb018f15910&gt;, &lt;common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460&gt;)
-
-    def test_grpc_context(
-        context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-        context_database = context_db_mb[0]
-    
-        # ----- Clean the database -----------------------------------------------------------------------------------------
-&gt;       context_database.clear_all()
-
-context/tests/test_unitary.py:128: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-common/orm/Database.py:32: in clear_all
-    for key in self._backend.keys():
-common/orm/backend/redis/RedisBackend.py:48: in keys
-    return [k.decode('UTF-8') for k in self._client.keys()]
-/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys
-    return self.execute_command("KEYS", pattern, **kwargs)
-/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command
-    conn = self.connection or pool.get_connection(command_name, **options)
-/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection
-    connection.connect()
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-            sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-        except socket.timeout:
-            raise TimeoutError("Timeout connecting to server")
-        except OSError as e:
-&gt;           raise ConnectionError(self._error_message(e))
-E           redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_grpc_topology[all_redis]" time="0.002"><failure message="redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.">self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-&gt;           sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:607: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = &lt;redis.retry.Retry object at 0x7fb0186487f0&gt;
-do = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb018563b80&gt;
-fail = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb018587550&gt;
-
-    def call_with_retry(self, do, fail):
-        """
-        Execute an operation that might fail and returns its result, or
-        raise the exception that was thrown depending on the `Backoff` object.
-        `do`: the operation to call. Expects no argument.
-        `fail`: the failure handler, expects the last error that was thrown
-        """
-        self._backoff.reset()
-        failures = 0
-        while True:
-            try:
-&gt;               return do()
-
-/usr/local/lib/python3.9/site-packages/redis/retry.py:45: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-&gt;       lambda: self._connect(), lambda error: self.disconnect(error)
-    )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:608: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-                sock.connect(socket_address)
-    
-                # set the socket_timeout now that we're connected
-                sock.settimeout(self.socket_timeout)
-                return sock
-    
-            except OSError as _:
-                err = _
-                if sock is not None:
-                    sock.close()
-    
-        if err is not None:
-&gt;           raise err
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:673: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-&gt;               sock.connect(socket_address)
-E               ConnectionRefusedError: [Errno 111] Connection refused
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError
-
-During handling of the above exception, another exception occurred:
-
-context_client_grpc = &lt;context.client.ContextClient.ContextClient object at 0x7fb018f15a30&gt;
-context_db_mb = (&lt;common.orm.Database.Database object at 0x7fb018f15910&gt;, &lt;common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460&gt;)
-
-    def test_grpc_topology(
-        context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-        context_database = context_db_mb[0]
-    
-        # ----- Clean the database -----------------------------------------------------------------------------------------
-&gt;       context_database.clear_all()
-
-context/tests/test_unitary.py:249: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-common/orm/Database.py:32: in clear_all
-    for key in self._backend.keys():
-common/orm/backend/redis/RedisBackend.py:48: in keys
-    return [k.decode('UTF-8') for k in self._client.keys()]
-/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys
-    return self.execute_command("KEYS", pattern, **kwargs)
-/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command
-    conn = self.connection or pool.get_connection(command_name, **options)
-/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection
-    connection.connect()
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-            sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-        except socket.timeout:
-            raise TimeoutError("Timeout connecting to server")
-        except OSError as e:
-&gt;           raise ConnectionError(self._error_message(e))
-E           redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_grpc_device[all_redis]" time="0.001"><failure message="redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.">self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-&gt;           sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:607: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = &lt;redis.retry.Retry object at 0x7fb0186487f0&gt;
-do = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb018683820&gt;
-fail = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb018653f70&gt;
-
-    def call_with_retry(self, do, fail):
-        """
-        Execute an operation that might fail and returns its result, or
-        raise the exception that was thrown depending on the `Backoff` object.
-        `do`: the operation to call. Expects no argument.
-        `fail`: the failure handler, expects the last error that was thrown
-        """
-        self._backoff.reset()
-        failures = 0
-        while True:
-            try:
-&gt;               return do()
-
-/usr/local/lib/python3.9/site-packages/redis/retry.py:45: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-&gt;       lambda: self._connect(), lambda error: self.disconnect(error)
-    )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:608: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-                sock.connect(socket_address)
-    
-                # set the socket_timeout now that we're connected
-                sock.settimeout(self.socket_timeout)
-                return sock
-    
-            except OSError as _:
-                err = _
-                if sock is not None:
-                    sock.close()
-    
-        if err is not None:
-&gt;           raise err
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:673: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-&gt;               sock.connect(socket_address)
-E               ConnectionRefusedError: [Errno 111] Connection refused
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError
-
-During handling of the above exception, another exception occurred:
-
-context_client_grpc = &lt;context.client.ContextClient.ContextClient object at 0x7fb018f15a30&gt;
-context_db_mb = (&lt;common.orm.Database.Database object at 0x7fb018f15910&gt;, &lt;common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460&gt;)
-
-    def test_grpc_device(
-        context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-        context_database = context_db_mb[0]
-    
-        # ----- Clean the database -----------------------------------------------------------------------------------------
-&gt;       context_database.clear_all()
-
-context/tests/test_unitary.py:381: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-common/orm/Database.py:32: in clear_all
-    for key in self._backend.keys():
-common/orm/backend/redis/RedisBackend.py:48: in keys
-    return [k.decode('UTF-8') for k in self._client.keys()]
-/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys
-    return self.execute_command("KEYS", pattern, **kwargs)
-/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command
-    conn = self.connection or pool.get_connection(command_name, **options)
-/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection
-    connection.connect()
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-            sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-        except socket.timeout:
-            raise TimeoutError("Timeout connecting to server")
-        except OSError as e:
-&gt;           raise ConnectionError(self._error_message(e))
-E           redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_grpc_link[all_redis]" time="0.001"><failure message="redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.">self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-&gt;           sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:607: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = &lt;redis.retry.Retry object at 0x7fb0186487f0&gt;
-do = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186c0550&gt;
-fail = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186c0670&gt;
-
-    def call_with_retry(self, do, fail):
-        """
-        Execute an operation that might fail and returns its result, or
-        raise the exception that was thrown depending on the `Backoff` object.
-        `do`: the operation to call. Expects no argument.
-        `fail`: the failure handler, expects the last error that was thrown
-        """
-        self._backoff.reset()
-        failures = 0
-        while True:
-            try:
-&gt;               return do()
-
-/usr/local/lib/python3.9/site-packages/redis/retry.py:45: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-&gt;       lambda: self._connect(), lambda error: self.disconnect(error)
-    )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:608: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-                sock.connect(socket_address)
-    
-                # set the socket_timeout now that we're connected
-                sock.settimeout(self.socket_timeout)
-                return sock
-    
-            except OSError as _:
-                err = _
-                if sock is not None:
-                    sock.close()
-    
-        if err is not None:
-&gt;           raise err
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:673: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-&gt;               sock.connect(socket_address)
-E               ConnectionRefusedError: [Errno 111] Connection refused
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError
-
-During handling of the above exception, another exception occurred:
-
-context_client_grpc = &lt;context.client.ContextClient.ContextClient object at 0x7fb018f15a30&gt;
-context_db_mb = (&lt;common.orm.Database.Database object at 0x7fb018f15910&gt;, &lt;common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460&gt;)
-
-    def test_grpc_link(
-        context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-        context_database = context_db_mb[0]
-    
-        # ----- Clean the database -----------------------------------------------------------------------------------------
-&gt;       context_database.clear_all()
-
-context/tests/test_unitary.py:556: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-common/orm/Database.py:32: in clear_all
-    for key in self._backend.keys():
-common/orm/backend/redis/RedisBackend.py:48: in keys
-    return [k.decode('UTF-8') for k in self._client.keys()]
-/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys
-    return self.execute_command("KEYS", pattern, **kwargs)
-/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command
-    conn = self.connection or pool.get_connection(command_name, **options)
-/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection
-    connection.connect()
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-            sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-        except socket.timeout:
-            raise TimeoutError("Timeout connecting to server")
-        except OSError as e:
-&gt;           raise ConnectionError(self._error_message(e))
-E           redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_grpc_service[all_redis]" time="0.001"><failure message="redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.">self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-&gt;           sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:607: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = &lt;redis.retry.Retry object at 0x7fb0186487f0&gt;
-do = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb018f2f700&gt;
-fail = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186188b0&gt;
-
-    def call_with_retry(self, do, fail):
-        """
-        Execute an operation that might fail and returns its result, or
-        raise the exception that was thrown depending on the `Backoff` object.
-        `do`: the operation to call. Expects no argument.
-        `fail`: the failure handler, expects the last error that was thrown
-        """
-        self._backoff.reset()
-        failures = 0
-        while True:
-            try:
-&gt;               return do()
-
-/usr/local/lib/python3.9/site-packages/redis/retry.py:45: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-&gt;       lambda: self._connect(), lambda error: self.disconnect(error)
-    )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:608: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-                sock.connect(socket_address)
-    
-                # set the socket_timeout now that we're connected
-                sock.settimeout(self.socket_timeout)
-                return sock
-    
-            except OSError as _:
-                err = _
-                if sock is not None:
-                    sock.close()
-    
-        if err is not None:
-&gt;           raise err
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:673: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-&gt;               sock.connect(socket_address)
-E               ConnectionRefusedError: [Errno 111] Connection refused
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError
-
-During handling of the above exception, another exception occurred:
-
-context_client_grpc = &lt;context.client.ContextClient.ContextClient object at 0x7fb018f15a30&gt;
-context_db_mb = (&lt;common.orm.Database.Database object at 0x7fb018f15910&gt;, &lt;common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460&gt;)
-
-    def test_grpc_service(
-        context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-        context_database = context_db_mb[0]
-    
-        # ----- Clean the database -----------------------------------------------------------------------------------------
-&gt;       context_database.clear_all()
-
-context/tests/test_unitary.py:739: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-common/orm/Database.py:32: in clear_all
-    for key in self._backend.keys():
-common/orm/backend/redis/RedisBackend.py:48: in keys
-    return [k.decode('UTF-8') for k in self._client.keys()]
-/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys
-    return self.execute_command("KEYS", pattern, **kwargs)
-/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command
-    conn = self.connection or pool.get_connection(command_name, **options)
-/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection
-    connection.connect()
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-            sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-        except socket.timeout:
-            raise TimeoutError("Timeout connecting to server")
-        except OSError as e:
-&gt;           raise ConnectionError(self._error_message(e))
-E           redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_grpc_connection[all_redis]" time="0.001"><failure message="redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.">self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-&gt;           sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:607: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = &lt;redis.retry.Retry object at 0x7fb0186487f0&gt;
-do = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186a43a0&gt;
-fail = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186a4310&gt;
-
-    def call_with_retry(self, do, fail):
-        """
-        Execute an operation that might fail and returns its result, or
-        raise the exception that was thrown depending on the `Backoff` object.
-        `do`: the operation to call. Expects no argument.
-        `fail`: the failure handler, expects the last error that was thrown
-        """
-        self._backoff.reset()
-        failures = 0
-        while True:
-            try:
-&gt;               return do()
-
-/usr/local/lib/python3.9/site-packages/redis/retry.py:45: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-&gt;       lambda: self._connect(), lambda error: self.disconnect(error)
-    )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:608: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-                sock.connect(socket_address)
-    
-                # set the socket_timeout now that we're connected
-                sock.settimeout(self.socket_timeout)
-                return sock
-    
-            except OSError as _:
-                err = _
-                if sock is not None:
-                    sock.close()
-    
-        if err is not None:
-&gt;           raise err
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:673: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-&gt;               sock.connect(socket_address)
-E               ConnectionRefusedError: [Errno 111] Connection refused
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError
-
-During handling of the above exception, another exception occurred:
-
-context_client_grpc = &lt;context.client.ContextClient.ContextClient object at 0x7fb018f15a30&gt;
-context_db_mb = (&lt;common.orm.Database.Database object at 0x7fb018f15910&gt;, &lt;common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460&gt;)
-
-    def test_grpc_connection(
-        context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-        context_database = context_db_mb[0]
-    
-        # ----- Clean the database -----------------------------------------------------------------------------------------
-&gt;       context_database.clear_all()
-
-context/tests/test_unitary.py:926: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-common/orm/Database.py:32: in clear_all
-    for key in self._backend.keys():
-common/orm/backend/redis/RedisBackend.py:48: in keys
-    return [k.decode('UTF-8') for k in self._client.keys()]
-/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys
-    return self.execute_command("KEYS", pattern, **kwargs)
-/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command
-    conn = self.connection or pool.get_connection(command_name, **options)
-/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection
-    connection.connect()
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-            sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-        except socket.timeout:
-            raise TimeoutError("Timeout connecting to server")
-        except OSError as e:
-&gt;           raise ConnectionError(self._error_message(e))
-E           redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_populate_database[all_redis]" time="0.001"><failure message="redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.">self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-&gt;           sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:607: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = &lt;redis.retry.Retry object at 0x7fb0186487f0&gt;
-do = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186d49d0&gt;
-fail = &lt;function Connection.connect.&lt;locals&gt;.&lt;lambda&gt; at 0x7fb0186d4280&gt;
-
-    def call_with_retry(self, do, fail):
-        """
-        Execute an operation that might fail and returns its result, or
-        raise the exception that was thrown depending on the `Backoff` object.
-        `do`: the operation to call. Expects no argument.
-        `fail`: the failure handler, expects the last error that was thrown
-        """
-        self._backoff.reset()
-        failures = 0
-        while True:
-            try:
-&gt;               return do()
-
-/usr/local/lib/python3.9/site-packages/redis/retry.py:45: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-&gt;       lambda: self._connect(), lambda error: self.disconnect(error)
-    )
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:608: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-                sock.connect(socket_address)
-    
-                # set the socket_timeout now that we're connected
-                sock.settimeout(self.socket_timeout)
-                return sock
-    
-            except OSError as _:
-                err = _
-                if sock is not None:
-                    sock.close()
-    
-        if err is not None:
-&gt;           raise err
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:673: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def _connect(self):
-        "Create a TCP socket connection"
-        # we want to mimic what socket.create_connection does to support
-        # ipv4/ipv6, but we want to set options prior to calling
-        # socket.connect()
-        err = None
-        for res in socket.getaddrinfo(
-            self.host, self.port, self.socket_type, socket.SOCK_STREAM
-        ):
-            family, socktype, proto, canonname, socket_address = res
-            sock = None
-            try:
-                sock = socket.socket(family, socktype, proto)
-                # TCP_NODELAY
-                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    
-                # TCP_KEEPALIVE
-                if self.socket_keepalive:
-                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-                    for k, v in self.socket_keepalive_options.items():
-                        sock.setsockopt(socket.IPPROTO_TCP, k, v)
-    
-                # set the socket_connect_timeout before we connect
-                sock.settimeout(self.socket_connect_timeout)
-    
-                # connect
-&gt;               sock.connect(socket_address)
-E               ConnectionRefusedError: [Errno 111] Connection refused
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError
-
-During handling of the above exception, another exception occurred:
-
-context_db_mb = (&lt;common.orm.Database.Database object at 0x7fb018f15910&gt;, &lt;common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460&gt;)
-context_service_grpc = &lt;context.service.grpc_server.ContextService.ContextService object at 0x7fb018f158e0&gt;
-
-    def test_rest_populate_database(
-        context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name
-        context_service_grpc : ContextService           # pylint: disable=redefined-outer-name
-        ):
-        database = context_db_mb[0]
-&gt;       database.clear_all()
-
-context/tests/test_unitary.py:1179: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-common/orm/Database.py:32: in clear_all
-    for key in self._backend.keys():
-common/orm/backend/redis/RedisBackend.py:48: in keys
-    return [k.decode('UTF-8') for k in self._client.keys()]
-/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys
-    return self.execute_command("KEYS", pattern, **kwargs)
-/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command
-    conn = self.connection or pool.get_connection(command_name, **options)
-/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection
-    connection.connect()
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = Connection&lt;host=127.0.0.1,port=6379,db=0&gt;
-
-    def connect(self):
-        "Connects to the Redis server if not already connected"
-        if self._sock:
-            return
-        try:
-            sock = self.retry.call_with_retry(
-                lambda: self._connect(), lambda error: self.disconnect(error)
-            )
-        except socket.timeout:
-            raise TimeoutError("Timeout connecting to server")
-        except OSError as e:
-&gt;           raise ConnectionError(self._error_message(e))
-E           redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused.
-
-/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_context_ids[all_redis]" time="1.033"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-&gt;       reply = do_rest_request('/context_ids')
-
-context/tests/test_unitary.py:1183: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context_ids'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_contexts[all_redis]" time="0.006"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-&gt;       reply = do_rest_request('/contexts')
-
-context/tests/test_unitary.py:1187: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/contexts'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_context[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-&gt;       reply = do_rest_request('/context/{:s}'.format(context_uuid))
-
-context/tests/test_unitary.py:1192: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_topology_ids[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-&gt;       reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid))
-
-context/tests/test_unitary.py:1197: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/topology_ids'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_topologies[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-&gt;       reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid))
-
-context/tests/test_unitary.py:1202: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/topologies'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_topology[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-        topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID)
-&gt;       reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid))
-
-context/tests/test_unitary.py:1208: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/topology/admin'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_service_ids[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-&gt;       reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid))
-
-context/tests/test_unitary.py:1213: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/service_ids'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_services[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-&gt;       reply = do_rest_request('/context/{:s}/services'.format(context_uuid))
-
-context/tests/test_unitary.py:1218: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/services'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_service[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-        service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='')
-&gt;       reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid))
-
-context/tests/test_unitary.py:1224: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/service/SVC%3AR1%2FEP100-R2%2FEP100'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_device_ids[all_redis]" time="0.006"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-&gt;       reply = do_rest_request('/device_ids')
-
-context/tests/test_unitary.py:1228: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/device_ids'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_devices[all_redis]" time="0.006"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-&gt;       reply = do_rest_request('/devices')
-
-context/tests/test_unitary.py:1232: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/devices'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_device[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='')
-&gt;       reply = do_rest_request('/device/{:s}'.format(device_uuid))
-
-context/tests/test_unitary.py:1237: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/device/R1'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_link_ids[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-&gt;       reply = do_rest_request('/link_ids')
-
-context/tests/test_unitary.py:1241: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/link_ids'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_links[all_redis]" time="0.006"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-&gt;       reply = do_rest_request('/links')
-
-context/tests/test_unitary.py:1245: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/links'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_link[all_redis]" time="0.008"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='')
-&gt;       reply = do_rest_request('/link/{:s}'.format(link_uuid))
-
-context/tests/test_unitary.py:1250: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/link/R1%2FEP2-R2%2FEP1'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_connection_ids[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-        service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
-&gt;       reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid))
-
-context/tests/test_unitary.py:1256: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/service/SVC%3AR1%2FEP100-R3%2FEP100/connection_ids'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_connections[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-        service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
-&gt;       reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid))
-
-context/tests/test_unitary.py:1262: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/context/admin/service/SVC%3AR1%2FEP100-R3%2FEP100/connections'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_rest_get_connection[all_redis]" time="0.007"><failure message="AssertionError: Reply failed with code 500&#10;assert 500 == 200&#10;  +500&#10;  -200">context_service_rest = &lt;RestServer(Thread-71, started daemon 140392926267136)&gt;
-
-    def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-        connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='')
-&gt;       reply = do_rest_request('/connection/{:s}'.format(connection_uuid))
-
-context/tests/test_unitary.py:1267: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-url = '/connection/CON%3AR1%2FEP100-R3%2FEP100'
-
-    def do_rest_request(url : str):
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-        LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-        reply = requests.get(request_url)
-        LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-&gt;       assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-E       AssertionError: Reply failed with code 500
-E       assert 500 == 200
-E         +500
-E         -200
-
-context/tests/test_unitary.py:116: AssertionError</failure></testcase><testcase classname="context.tests.test_unitary" name="test_tools_fast_string_hasher" time="0.423" /></testsuite></testsuites>
\ No newline at end of file
diff --git a/src/device/requirements.in b/src/device/requirements.in
index 10506fbd42c5b7a64afb3cc7c6ea32e0f1fa49f6..9c8c0ef18f3bcd4a92180465d11cd465c4336d44 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -10,6 +10,9 @@ pytz==2021.3
 redis==4.1.2
 requests==2.27.1
 xmltodict==0.12.0
+tabulate
+ipaddress
+macaddress
 
 # pip's dependency resolver does not take into account installed packages.
 # p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one
diff --git a/src/device/service/DeviceService.py b/src/device/service/DeviceService.py
index 4dc2b01000d8ca6dd2b3ecee0b0f867338636c73..59134f26d3dd8c3fa0a9dddbcd1d3df298ec076a 100644
--- a/src/device/service/DeviceService.py
+++ b/src/device/service/DeviceService.py
@@ -23,10 +23,15 @@ from .driver_api.DriverInstanceCache import DriverInstanceCache
 from .DeviceServiceServicerImpl import DeviceServiceServicerImpl
 from .MonitoringLoops import MonitoringLoops
 
+# Custom gRPC settings
+# Multiple clients might keep connections alive waiting for RPC methods to be executed.
+# Requests needs to be serialized to ensure correct device configurations
+GRPC_MAX_WORKERS = 200
+
 class DeviceService(GenericGrpcService):
     def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name: str = __name__) -> None:
         port = get_service_port_grpc(ServiceNameEnum.DEVICE)
-        super().__init__(port, cls_name=cls_name)
+        super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
         database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
         self.monitoring_loops = MonitoringLoops(database)
         self.device_servicer = DeviceServiceServicerImpl(database, driver_instance_cache, self.monitoring_loops)
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index 6189816bcd35dd973e4a7da389f256bdb685a79f..d5d44f34ffb69a337b715a0884aea3770b3d3cec 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -24,6 +24,7 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
 from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, OperationFailedException
 from common.tools.grpc.Tools import grpc_message_to_json
+from common.tools.mutex_queues.MutexQueues import MutexQueues
 from context.client.ContextClient import ContextClient
 from .database.ConfigModel import (
     ConfigModel, ConfigRuleModel, ORM_ConfigActionEnum, get_config_rules, grpc_config_rules_to_raw, update_config)
@@ -56,6 +57,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
         self.database = database
         self.driver_instance_cache = driver_instance_cache
         self.monitoring_loops = monitoring_loops
+        self.mutex_queues = MutexQueues()
         LOGGER.debug('Servicer Created')
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
@@ -101,348 +103,368 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
         json_request['device_config'] = {}
         request = Device(**json_request)
 
-        sync_device_from_context(device_uuid, self.context_client, self.database)
-        db_device,_ = update_device_in_local_database(self.database, request)
-
-        driver_filter_fields = get_device_driver_filter_fields(db_device)
-
-        #LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules)))
-        address  = connection_config_rules.pop('address', None)
-        port     = connection_config_rules.pop('port', None)
-        settings = connection_config_rules.pop('settings', '{}')
+        self.mutex_queues.wait_my_turn(device_uuid)
         try:
-            settings = json.loads(settings)
-        except ValueError as e:
-            raise InvalidArgumentException(
-                'device.device_config.config_rules[settings]', settings,
-                extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e
-        driver : _Driver = self.driver_instance_cache.get(
-            device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings)
-        driver.Connect()
-
-        endpoints = driver.GetConfig([RESOURCE_ENDPOINTS])
-        try:
-            for resource_key, resource_value in endpoints:
+            sync_device_from_context(device_uuid, self.context_client, self.database)
+            db_device,_ = update_device_in_local_database(self.database, request)
+
+            driver_filter_fields = get_device_driver_filter_fields(db_device)
+
+            #LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules)))
+            address  = connection_config_rules.pop('address', None)
+            port     = connection_config_rules.pop('port', None)
+            settings = connection_config_rules.pop('settings', '{}')
+            try:
+                settings = json.loads(settings)
+            except ValueError as e:
+                raise InvalidArgumentException(
+                    'device.device_config.config_rules[settings]', settings,
+                    extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e
+            driver : _Driver = self.driver_instance_cache.get(
+                device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings)
+            driver.Connect()
+
+            endpoints = driver.GetConfig([RESOURCE_ENDPOINTS])
+            try:
+                for resource_key, resource_value in endpoints:
+                    if isinstance(resource_value, Exception):
+                        LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value)))
+                        continue
+                    endpoint_uuid = resource_value.get('uuid')
+                    endpoint_type = resource_value.get('type')
+                    str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
+                    db_endpoint, _ = update_or_create_object(
+                        self.database, EndPointModel, str_endpoint_key, {
+                        'device_fk'    : db_device,
+                        'endpoint_uuid': endpoint_uuid,
+                        'endpoint_type': endpoint_type,
+                        'resource_key' : resource_key,
+                    })
+                    sample_types : Dict[int, str] = resource_value.get('sample_types', {})
+                    for sample_type, monitor_resource_key in sample_types.items():
+                        str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
+                        update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, {
+                            'endpoint_fk'    : db_endpoint,
+                            'resource_key'   : monitor_resource_key,
+                            'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type),
+                        })
+            except: # pylint: disable=bare-except
+                LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints)))
+
+            raw_running_config_rules = driver.GetConfig()
+            running_config_rules = []
+            for resource_key, resource_value in raw_running_config_rules:
                 if isinstance(resource_value, Exception):
-                    LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value)))
+                    msg = 'Error retrieving config rules: {:s} => {:s}'
+                    LOGGER.error(msg.format(str(resource_key), str(resource_value)))
                     continue
-                endpoint_uuid = resource_value.get('uuid')
-                endpoint_type = resource_value.get('type')
-                str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
-                db_endpoint, _ = update_or_create_object(
-                    self.database, EndPointModel, str_endpoint_key, {
-                    'device_fk'    : db_device,
-                    'endpoint_uuid': endpoint_uuid,
-                    'endpoint_type': endpoint_type,
-                    'resource_key' : resource_key,
-                })
-                sample_types : Dict[int, str] = resource_value.get('sample_types', {})
-                for sample_type, monitor_resource_key in sample_types.items():
-                    str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
-                    update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, {
-                        'endpoint_fk'    : db_endpoint,
-                        'resource_key'   : monitor_resource_key,
-                        'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type),
-                    })
-        except: # pylint: disable=bare-except
-            LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints)))
-
-        raw_running_config_rules = driver.GetConfig()
-        running_config_rules = []
-        for resource_key, resource_value in raw_running_config_rules:
-            if isinstance(resource_value, Exception):
-                msg = 'Error retrieving config rules: {:s} => {:s}'
-                LOGGER.error(msg.format(str(resource_key), str(resource_value)))
-                continue
-            config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True))
-            running_config_rules.append(config_rule)
+                config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True))
+                running_config_rules.append(config_rule)
 
-        #for running_config_rule in running_config_rules:
-        #    LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
-        update_config(self.database, device_uuid, 'running', running_config_rules)
+            #for running_config_rule in running_config_rules:
+            #    LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
+            update_config(self.database, device_uuid, 'running', running_config_rules)
 
-        initial_config_rules = driver.GetInitialConfig()
-        update_config(self.database, device_uuid, 'initial', initial_config_rules)
+            initial_config_rules = driver.GetInitialConfig()
+            update_config(self.database, device_uuid, 'initial', initial_config_rules)
 
-        #LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump(
-        #    include_config_rules=True, include_drivers=True, include_endpoints=True))))
+            #LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump(
+            #    include_config_rules=True, include_drivers=True, include_endpoints=True))))
 
-        sync_device_to_context(db_device, self.context_client)
-        return DeviceId(**db_device.dump_id())
+            sync_device_to_context(db_device, self.context_client)
+            return DeviceId(**db_device.dump_id())
+        finally:
+            self.mutex_queues.signal_done(device_uuid)
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def ConfigureDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId:
         device_id = request.device_id
         device_uuid = device_id.device_uuid.uuid
 
-        sync_device_from_context(device_uuid, self.context_client, self.database)
+        self.mutex_queues.wait_my_turn(device_uuid)
+        try:
+            sync_device_from_context(device_uuid, self.context_client, self.database)
 
-        context_config_rules = get_config_rules(self.database, device_uuid, 'running')
-        context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules}
-        #LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules)))
+            context_config_rules = get_config_rules(self.database, device_uuid, 'running')
+            context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules}
+            #LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules)))
 
-        db_device,_ = update_device_in_local_database(self.database, request)
+            db_device,_ = update_device_in_local_database(self.database, request)
 
-        request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules)
-        #LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules)))
+            request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules)
+            #LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules)))
 
-        resources_to_set    : List[Tuple[str, Any]] = [] # key, value
-        resources_to_delete : List[Tuple[str, Any]] = [] # key, value
+            resources_to_set    : List[Tuple[str, Any]] = [] # key, value
+            resources_to_delete : List[Tuple[str, Any]] = [] # key, value
 
-        for config_rule in request_config_rules:
-            action, key, value = config_rule
-            if action == ORM_ConfigActionEnum.SET:
-                if (key not in context_config_rules) or (context_config_rules[key] != value):
-                    resources_to_set.append((key, value))
-            elif action == ORM_ConfigActionEnum.DELETE:
-                if key in context_config_rules:
-                    resources_to_delete.append((key, value))
+            for config_rule in request_config_rules:
+                action, key, value = config_rule
+                if action == ORM_ConfigActionEnum.SET:
+                    if (key not in context_config_rules) or (context_config_rules[key] != value):
+                        resources_to_set.append((key, value))
+                elif action == ORM_ConfigActionEnum.DELETE:
+                    if key in context_config_rules:
+                        resources_to_delete.append((key, value))
 
-        #LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set)))
-        #LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete)))
+            #LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set)))
+            #LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete)))
 
-        # TODO: use of datastores (might be virtual ones) to enable rollbacks
+            # TODO: use of datastores (might be virtual ones) to enable rollbacks
 
-        errors = []
+            errors = []
 
-        driver : _Driver = self.driver_instance_cache.get(device_uuid)
-        if driver is None:
-            errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid)))
+            driver : _Driver = self.driver_instance_cache.get(device_uuid)
+            if driver is None:
+                errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid)))
+
+            if len(errors) == 0:
+                results_setconfig = driver.SetConfig(resources_to_set)
+                errors.extend(check_set_errors(resources_to_set, results_setconfig))
 
-        if len(errors) == 0:
-            results_setconfig = driver.SetConfig(resources_to_set)
-            errors.extend(check_set_errors(resources_to_set, results_setconfig))
+            if len(errors) == 0:
+                results_deleteconfig = driver.DeleteConfig(resources_to_delete)
+                errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig))
 
-        if len(errors) == 0:
-            results_deleteconfig = driver.DeleteConfig(resources_to_delete)
-            errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig))
+            if len(errors) > 0:
+                raise OperationFailedException('ConfigureDevice', extra_details=errors)
 
-        if len(errors) > 0:
-            raise OperationFailedException('ConfigureDevice', extra_details=errors)
+            running_config_rules = driver.GetConfig()
+            running_config_rules = [
+                (ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True))
+                for config_rule in running_config_rules if not isinstance(config_rule[1], Exception)
+            ]
+            #for running_config_rule in running_config_rules:
+            #    LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
+            update_config(self.database, device_uuid, 'running', running_config_rules)
 
-        running_config_rules = driver.GetConfig()
-        running_config_rules = [
-            (ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True))
-            for config_rule in running_config_rules
-        ]
-        #for running_config_rule in running_config_rules:
-        #    LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
-        update_config(self.database, device_uuid, 'running', running_config_rules)
+            sync_device_to_context(db_device, self.context_client)
+            return DeviceId(**db_device.dump_id())
+        finally:
+            self.mutex_queues.signal_done(device_uuid)
 
-        sync_device_to_context(db_device, self.context_client)
-        return DeviceId(**db_device.dump_id())
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def DeleteDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty:
         device_uuid = request.device_uuid.uuid
 
-        self.monitoring_loops.remove(device_uuid)
+        self.mutex_queues.wait_my_turn(device_uuid)
+        try:
+            self.monitoring_loops.remove(device_uuid)
 
-        sync_device_from_context(device_uuid, self.context_client, self.database)
-        db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
-        if db_device is None: return Empty()
+            sync_device_from_context(device_uuid, self.context_client, self.database)
+            db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
+            if db_device is None: return Empty()
 
-        self.driver_instance_cache.delete(device_uuid)
-        delete_device_from_context(db_device, self.context_client)
+            self.driver_instance_cache.delete(device_uuid)
+            delete_device_from_context(db_device, self.context_client)
 
-        for db_kpi_pk,_ in db_device.references(KpiModel):
-            db_kpi = get_object(self.database, KpiModel, db_kpi_pk)
-            for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel):
-                get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete()
-            db_kpi.delete()
+            for db_kpi_pk,_ in db_device.references(KpiModel):
+                db_kpi = get_object(self.database, KpiModel, db_kpi_pk)
+                for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel):
+                    get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete()
+                db_kpi.delete()
 
-        for db_endpoint_pk,_ in db_device.references(EndPointModel):
-            db_endpoint = EndPointModel(self.database, db_endpoint_pk)
-            for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel):
-                get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete()
-            db_endpoint.delete()
+            for db_endpoint_pk,_ in db_device.references(EndPointModel):
+                db_endpoint = EndPointModel(self.database, db_endpoint_pk)
+                for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel):
+                    get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete()
+                db_endpoint.delete()
 
-        for db_driver_pk,_ in db_device.references(DriverModel):
-            get_object(self.database, DriverModel, db_driver_pk).delete()
+            for db_driver_pk,_ in db_device.references(DriverModel):
+                get_object(self.database, DriverModel, db_driver_pk).delete()
 
-        db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk)
-        for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel):
-            get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
+            db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk)
+            for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel):
+                get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
 
-        db_running_config = ConfigModel(self.database, db_device.device_running_config_fk)
-        for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel):
-            get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
+            db_running_config = ConfigModel(self.database, db_device.device_running_config_fk)
+            for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel):
+                get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
 
-        db_device.delete()
-        db_initial_config.delete()
-        db_running_config.delete()
-        return Empty()
+            db_device.delete()
+            db_initial_config.delete()
+            db_running_config.delete()
+            return Empty()
+        finally:
+            self.mutex_queues.signal_done(device_uuid)
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def GetInitialConfig(self, request : DeviceId, context : grpc.ServicerContext) -> DeviceConfig:
         device_uuid = request.device_uuid.uuid
 
-        sync_device_from_context(device_uuid, self.context_client, self.database)
-        db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
+        self.mutex_queues.wait_my_turn(device_uuid)
+        try:
+            sync_device_from_context(device_uuid, self.context_client, self.database)
+            db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
 
-        config_rules = {} if db_device is None else db_device.dump_initial_config()
-        return DeviceConfig(config_rules=config_rules)
+            config_rules = {} if db_device is None else db_device.dump_initial_config()
+            device_config = DeviceConfig(config_rules=config_rules)
+            return device_config
+        finally:
+            self.mutex_queues.signal_done(device_uuid)
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def MonitorDeviceKpi(self, request : MonitoringSettings, context : grpc.ServicerContext) -> Empty:
         kpi_uuid = request.kpi_id.kpi_id.uuid
+        device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid
+        self.mutex_queues.wait_my_turn(device_uuid)
+        try:
+            subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0)
+            if subscribe:
+                db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
+                if db_device is None:
+                    msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                endpoint_id = request.kpi_descriptor.endpoint_id
+                endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+                str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
+                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+                endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
+                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
+                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
+                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
+                db_endpoint : EndPointModel = get_object(
+                    self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False)
+                if db_endpoint is None:
+                    msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format(
+                        str(device_uuid), str(endpoint_uuid), str(str_endpoint_key))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                driver : _Driver = self.driver_instance_cache.get(device_uuid)
+                if driver is None:
+                    msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                sample_type = request.kpi_descriptor.kpi_sample_type
+
+                attributes = {
+                    'kpi_uuid'         : request.kpi_id.kpi_id.uuid,
+                    'kpi_description'  : request.kpi_descriptor.kpi_description,
+                    'kpi_sample_type'  : grpc_to_enum__kpi_sample_type(sample_type),
+                    'device_fk'        : db_device,
+                    'endpoint_fk'      : db_endpoint,
+                    'sampling_duration': request.sampling_duration_s,
+                    'sampling_interval': request.sampling_interval_s,
+                }
+                result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes)
+                db_kpi, updated = result
+
+                str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
+                db_endpoint_monitor : EndPointMonitorModel = get_object(
+                    self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
+                if db_endpoint_monitor is None:
+                    msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format(
+                        str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')),
+                        str(device_uuid), str(endpoint_uuid))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
+                str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
+                attributes = {
+                    'endpoint_monitor_fk': db_endpoint_monitor,
+                    'kpi_fk'             : db_kpi,
+                }
+                result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object(
+                    self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes)
+                db_endpoint_monitor_kpi, updated = result
+
+                resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
+                resources_to_subscribe.append(
+                    (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
+                results_subscribestate = driver.SubscribeState(resources_to_subscribe)
+                errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate)
+                if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
+
+                self.monitoring_loops.add(device_uuid, driver)
 
-        subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0)
-        if subscribe:
-            device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid
-
-            db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
-            if db_device is None:
-                msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            endpoint_id = request.kpi_descriptor.endpoint_id
-            endpoint_uuid = endpoint_id.endpoint_uuid.uuid
-            str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
-            endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-            endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
-            if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-            db_endpoint : EndPointModel = get_object(
-                self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False)
-            if db_endpoint is None:
-                msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format(
-                    str(device_uuid), str(endpoint_uuid), str(str_endpoint_key))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            driver : _Driver = self.driver_instance_cache.get(device_uuid)
-            if driver is None:
-                msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            sample_type = request.kpi_descriptor.kpi_sample_type
-
-            attributes = {
-                'kpi_uuid'         : request.kpi_id.kpi_id.uuid,
-                'kpi_description'  : request.kpi_descriptor.kpi_description,
-                'kpi_sample_type'  : grpc_to_enum__kpi_sample_type(sample_type),
-                'device_fk'        : db_device,
-                'endpoint_fk'      : db_endpoint,
-                'sampling_duration': request.sampling_duration_s,
-                'sampling_interval': request.sampling_interval_s,
-            }
-            result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes)
-            db_kpi, updated = result
-
-            str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
-            db_endpoint_monitor : EndPointMonitorModel = get_object(
-                self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
-            if db_endpoint_monitor is None:
-                msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format(
-                    str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')),
-                    str(device_uuid), str(endpoint_uuid))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
-            str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
-            attributes = {
-                'endpoint_monitor_fk': db_endpoint_monitor,
-                'kpi_fk'             : db_kpi,
-            }
-            result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object(
-                self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes)
-            db_endpoint_monitor_kpi, updated = result
-
-            resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
-            resources_to_subscribe.append(
-                (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
-            results_subscribestate = driver.SubscribeState(resources_to_subscribe)
-            errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate)
-            if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
-
-            self.monitoring_loops.add(device_uuid, driver)
-
-        else:
-            db_kpi : KpiModel = get_object(
-                self.database, KpiModel, kpi_uuid, raise_if_not_found=False)
-            if db_kpi is None:
-                msg = 'Kpi({:s}) not found'.format(str(kpi_uuid))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            db_device : DeviceModel = get_object(
-                self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False)
-            if db_device is None:
-                msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-            device_uuid = db_device.device_uuid
-
-            db_endpoint : EndPointModel = get_object(
-                self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False)
-            if db_endpoint is None:
-                msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-            endpoint_uuid = db_endpoint.endpoint_uuid
-            str_endpoint_key = db_endpoint.pk
-
-            kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type
-            sample_type = kpi_sample_type.value
-            str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
-            db_endpoint_monitor : EndPointMonitorModel = get_object(
-                self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
-            if db_endpoint_monitor is None:
-                msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
-            str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
-            db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object(
-                self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False)
-            if db_endpoint_monitor_kpi is None:
-                msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
-            resources_to_unsubscribe.append(
-                (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
-
-            driver : _Driver = self.driver_instance_cache.get(device_uuid)
-            if driver is None:
-                msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
-                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-            results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe)
-            errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate)
-            if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
-
-            db_endpoint_monitor_kpi.delete()
-            db_kpi.delete()
-
-            # There is one monitoring loop per device; keep them active since they are re-used by different monitoring
-            # requests.
-            #self.monitoring_loops.remove(device_uuid)
-
-        # Subscriptions are not stored as classical driver config.
-        # TODO: consider adding it somehow in the configuration.
-        # Warning: GetConfig might be very slow in OpenConfig devices
-        #running_config_rules = [
-        #    (config_rule[0], json.dumps(config_rule[1], sort_keys=True))
-        #    for config_rule in driver.GetConfig()
-        #]
-        #context_config_rules = {
-        #    config_rule[1]: config_rule[2]
-        #    for config_rule in get_config_rules(self.database, device_uuid, 'running')
-        #}
-
-        ## each in context, not in running => delete in context
-        ## each in running, not in context => add to context
-        ## each in context and in running, context.value != running.value => update in context
-        #running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = []
-        #for config_rule_key,config_rule_value in running_config_rules:
-        #    running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value))
-        #    context_config_rules.pop(config_rule_key, None)
-        #for context_rule_key,context_rule_value in context_config_rules.items():
-        #    running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value))
-
-        ##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}'
-        ##for i,running_config_rules_action in enumerate(running_config_rules_actions):
-        ##    LOGGER.info(msg.format(i, str(running_config_rules_action)))
-        #update_config(self.database, device_uuid, 'running', running_config_rules_actions)
-
-        sync_device_to_context(db_device, self.context_client)
-        return Empty()
+            else:
+                db_kpi : KpiModel = get_object(
+                    self.database, KpiModel, kpi_uuid, raise_if_not_found=False)
+                if db_kpi is None:
+                    msg = 'Kpi({:s}) not found'.format(str(kpi_uuid))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                db_device : DeviceModel = get_object(
+                    self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False)
+                if db_device is None:
+                    msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+                device_uuid = db_device.device_uuid
+
+                db_endpoint : EndPointModel = get_object(
+                    self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False)
+                if db_endpoint is None:
+                    msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+                endpoint_uuid = db_endpoint.endpoint_uuid
+                str_endpoint_key = db_endpoint.pk
+
+                kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type
+                sample_type = kpi_sample_type.value
+                str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
+                db_endpoint_monitor : EndPointMonitorModel = get_object(
+                    self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
+                if db_endpoint_monitor is None:
+                    msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
+                str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
+                db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object(
+                    self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False)
+                if db_endpoint_monitor_kpi is None:
+                    msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
+                resources_to_unsubscribe.append(
+                    (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
+
+                driver : _Driver = self.driver_instance_cache.get(device_uuid)
+                if driver is None:
+                    msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
+                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+                results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe)
+                errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate)
+                if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
+
+                db_endpoint_monitor_kpi.delete()
+                db_kpi.delete()
+
+                # There is one monitoring loop per device; keep them active since they are re-used by different monitoring
+                # requests.
+                #self.monitoring_loops.remove(device_uuid)
+
+            # Subscriptions are not stored as classical driver config.
+            # TODO: consider adding it somehow in the configuration.
+            # Warning: GetConfig might be very slow in OpenConfig devices
+            #running_config_rules = [
+            #    (config_rule[0], json.dumps(config_rule[1], sort_keys=True))
+            #    for config_rule in driver.GetConfig()
+            #]
+            #context_config_rules = {
+            #    config_rule[1]: config_rule[2]
+            #    for config_rule in get_config_rules(self.database, device_uuid, 'running')
+            #}
+
+            ## each in context, not in running => delete in context
+            ## each in running, not in context => add to context
+            ## each in context and in running, context.value != running.value => update in context
+            #running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = []
+            #for config_rule_key,config_rule_value in running_config_rules:
+            #    running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value))
+            #    context_config_rules.pop(config_rule_key, None)
+            #for context_rule_key,context_rule_value in context_config_rules.items():
+            #    running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value))
+
+            ##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}'
+            ##for i,running_config_rules_action in enumerate(running_config_rules_actions):
+            ##    LOGGER.info(msg.format(i, str(running_config_rules_action)))
+            #update_config(self.database, device_uuid, 'running', running_config_rules_actions)
+
+            sync_device_to_context(db_device, self.context_client)
+            return Empty()
+        finally:
+            self.mutex_queues.signal_done(device_uuid)
diff --git a/src/device/service/__main__.py b/src/device/service/__main__.py
index 1f0adfa8f1dd8b3e307ed202967b1d5195171f11..5c9b41531e7bc579cbe5cc563f20b193f6bc5a90 100644
--- a/src/device/service/__main__.py
+++ b/src/device/service/__main__.py
@@ -34,7 +34,7 @@ def main():
     global LOGGER # pylint: disable=global-statement
 
     log_level = get_log_level()
-    logging.basicConfig(level=log_level)
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
     logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
     logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
     logging.getLogger('monitoring-client').setLevel(logging.WARNING)
diff --git a/src/device/service/database/EndPointModel.py b/src/device/service/database/EndPointModel.py
index 84d0c97073481af162b1e66f7e35c93bc6e1eed5..3d4435737349809c527c80546ed412e621afcbdd 100644
--- a/src/device/service/database/EndPointModel.py
+++ b/src/device/service/database/EndPointModel.py
@@ -34,7 +34,6 @@ class EndPointModel(Model):
     device_fk = ForeignKeyField(DeviceModel)
     endpoint_uuid = StringField(required=True, allow_empty=False)
     endpoint_type = StringField()
-    resource_key = StringField(required=True, allow_empty=False)
 
     def dump_id(self) -> Dict:
         device_id = DeviceModel(self.database, self.device_fk).dump_id()
@@ -74,13 +73,7 @@ def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc
     for kpi_sample_type in grpc_endpoint_kpi_sample_types:
         orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type)
         str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)])
-        #db_endpoint_kpi_sample_type = EndPointMonitorModel(database, str_endpoint_kpi_sample_type_key)
-        #db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint
-        #db_endpoint_kpi_sample_type.resource_key = '' # during initialization, allow empty value
-        #db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type
-        #db_endpoint_kpi_sample_type.save()
         update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, {
             'endpoint_fk'    : db_endpoint,
-            #'resource_key'   : '', # during initialization, allow empty value
             'kpi_sample_type': orm_kpi_sample_type,
         })
diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py
index 7dbb9eddb238dcaae9d00b579a1851aacf53225d..371f4cccb4e002e4d232823e47e31f577d1a4285 100644
--- a/src/device/service/driver_api/_Driver.py
+++ b/src/device/service/driver_api/_Driver.py
@@ -15,16 +15,18 @@
 import threading
 from typing import Any, Iterator, List, Optional, Tuple, Union
 
-# Special resource names to request to the driver to retrieve the specified configuration/structural resources.
+# Special resource names to request to the driver to retrieve the specified
+# configuration/structural resources.
 # These resource names should be used with GetConfig() method.
-RESOURCE_ENDPOINTS         = '__endpoints__'
-RESOURCE_INTERFACES        = '__interfaces__'
+RESOURCE_ENDPOINTS = '__endpoints__'
+RESOURCE_INTERFACES = '__interfaces__'
 RESOURCE_NETWORK_INSTANCES = '__network_instances__'
-RESOURCE_ROUTING_POLICIES  = '__routing_policies__'
-RESOURCE_ACL               = '__acl__'
+RESOURCE_ROUTING_POLICIES = '__routing_policies__'
+RESOURCE_ACL = '__acl__'
+
 
 class _Driver:
-    def __init__(self, address : str, port : int, **settings) -> None:
+    def __init__(self, address: str, port: int, **settings) -> None:
         """ Initialize Driver.
             Parameters:
                 address : str
@@ -56,92 +58,122 @@ class _Driver:
         """ Retrieve initial configuration of entire device.
             Returns:
                 values : List[Tuple[str, Any]]
-                    List of tuples (resource key, resource value) for resource keys.
+                    List of tuples (resource key, resource value) for
+                    resource keys.
         """
         raise NotImplementedError()
 
-    def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
-        """ Retrieve running configuration of entire device, or selected resource keys.
+    def GetConfig(self, resource_keys: List[str] = []) -> \
+            List[Tuple[str, Union[Any, None, Exception]]]:
+        """ Retrieve running configuration of entire device or
+        selected resource keys.
             Parameters:
                 resource_keys : List[str]
                     List of keys pointing to the resources to be retrieved.
             Returns:
                 values : List[Tuple[str, Union[Any, None, Exception]]]
-                    List of tuples (resource key, resource value) for resource keys requested. If a resource is found,
-                    the appropriate value type must be retrieved. If a resource is not found, None must be retrieved as
-                    value for that resource. In case of Exception, the Exception must be retrieved as value.
+                    List of tuples (resource key, resource value) for
+                    resource keys requested. If a resource is found,
+                    the appropriate value type must be retrieved.
+                    If a resource is not found, None must be retrieved as
+                    value for that resource. In case of Exception,
+                    the Exception must be retrieved as value.
         """
         raise NotImplementedError()
 
-    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+    def SetConfig(self, resources: List[Tuple[str, Any]]) -> \
+            List[Union[bool, Exception]]:
         """ Create/Update configuration for a list of resources.
             Parameters:
                 resources : List[Tuple[str, Any]]
-                    List of tuples, each containing a resource_key pointing the resource to be modified, and a
-                    resource_value containing the new value to be set.
+                    List of tuples, each containing a resource_key pointing the
+                    resource to be modified, and a resource_value containing
+                    the new value to be set.
             Returns:
                 results : List[Union[bool, Exception]]
-                    List of results for resource key changes requested. Return values must be in the same order than
-                    resource keys requested. If a resource is properly set, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                    List of results for resource key changes requested.
+                    Return values must be in the same order as the
+                    resource keys requested. If a resource is properly set,
+                    True must be retrieved; otherwise, the Exception that is
+                    raised during the processing must be retrieved.
         """
         raise NotImplementedError()
 
-    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+    def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> \
+            List[Union[bool, Exception]]:
         """ Delete configuration for a list of resources.
             Parameters:
                 resources : List[Tuple[str, Any]]
-                    List of tuples, each containing a resource_key pointing the resource to be modified, and a
-                    resource_value containing possible additionally required values to locate the value to be removed.
+                    List of tuples, each containing a resource_key pointing the
+                    resource to be modified, and a resource_value containing
+                    possible additionally required values to locate
+                    the value to be removed.
             Returns:
-                results : List[bool]
-                    List of results for resource key deletions requested. Return values must be in the same order than
-                    resource keys requested. If a resource is properly deleted, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                results : List[Union[bool, Exception]]
+                    List of results for resource key deletions requested.
+                    Return values must be in the same order as the resource keys
+                    requested. If a resource is properly deleted, True must be
+                    retrieved; otherwise, the Exception that is raised during
+                    the processing must be retrieved.
         """
         raise NotImplementedError()
 
-    def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
-        """ Subscribe to state information of entire device, or selected resources. Subscriptions are incremental.
+    def SubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> \
+            List[Union[bool, Exception]]:
+        """ Subscribe to state information of entire device or
+        selected resources. Subscriptions are incremental.
             Driver should keep track of requested resources.
             Parameters:
                 subscriptions : List[Tuple[str, float, float]]
-                    List of tuples, each containing a resource_key pointing the resource to be subscribed, a
-                    sampling_duration, and a sampling_interval (both in seconds with float representation) defining,
-                    respectively, for how long monitoring should last, and the desired monitoring interval for the
-                    resource specified.
+                    List of tuples, each containing a resource_key pointing the
+                    resource to be subscribed, a sampling_duration, and a
+                    sampling_interval (both in seconds with float
+                    representation) defining, respectively, for how long
+                    monitoring should last, and the desired monitoring interval
+                    for the resource specified.
             Returns:
-                results : List[bool]
-                    List of results for resource key subscriptions requested. Return values must be in the same order
-                    than resource keys requested. If a resource is properly subscribed, True must be retrieved;
-                    otherwise, the Exception that is raised during the processing must be retrieved.
+                results : List[Union[bool, Exception]]
+                    List of results for resource key subscriptions requested.
+                    Return values must be in the same order as the resource keys
+                    requested. If a resource is properly subscribed,
+                    True must be retrieved; otherwise, the Exception that is
+                    raised during the processing must be retrieved.
         """
         raise NotImplementedError()
 
-    def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
-        """ Unsubscribe from state information of entire device, or selected resources. Subscriptions are incremental.
+    def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]]) \
+            -> List[Union[bool, Exception]]:
+        """ Unsubscribe from state information of entire device
+        or selected resources. Subscriptions are incremental.
             Driver should keep track of requested resources.
             Parameters:
                 subscriptions : List[str]
-                    List of tuples, each containing a resource_key pointing the resource to be subscribed, a
-                    sampling_duration, and a sampling_interval (both in seconds with float representation) defining,
-                    respectively, for how long monitoring should last, and the desired monitoring interval for the
-                    resource specified.
+                    List of tuples, each containing a resource_key pointing the
+                    resource to be subscribed, a sampling_duration, and a
+                    sampling_interval (both in seconds with float
+                    representation) defining, respectively, for how long
+                    monitoring should last, and the desired monitoring interval
+                    for the resource specified.
             Returns:
                 results : List[Union[bool, Exception]]
-                    List of results for resource key unsubscriptions requested. Return values must be in the same order
-                    than resource keys requested. If a resource is properly unsubscribed, True must be retrieved;
-                    otherwise, the Exception that is raised during the processing must be retrieved.
+                    List of results for resource key un-subscriptions requested.
+                    Return values must be in the same order as the resource keys
+                    requested. If a resource is properly unsubscribed,
+                    True must be retrieved; otherwise, the Exception that is
+                    raised during the processing must be retrieved.
         """
         raise NotImplementedError()
 
     def GetState(
         self, blocking=False, terminate : Optional[threading.Event] = None
     ) -> Iterator[Tuple[float, str, Any]]:
-        """ Retrieve last collected values for subscribed resources. Operates as a generator, so this method should be
-            called once and will block until values are available. When values are available, it should yield each of
-            them and block again until new values are available. When the driver is destroyed, GetState() can return
-            instead of yield to terminate the loop. Terminate enables to request interruption of the generation.
+        """ Retrieve last collected values for subscribed resources.
+        Operates as a generator, so this method should be called once and will
+        block until values are available. When values are available,
+        it should yield each of them and block again until new values are
+        available. When the driver is destroyed, GetState() can return instead
+        of yield to terminate the loop.
+        Terminate enables to request interruption of the generation.
             Examples:
                 # keep looping waiting for extra samples (generator loop)
                 terminate = threading.Event()
@@ -161,20 +193,27 @@ class _Driver:
                     if i == 10: terminate.set()
             Parameters:
                 blocking : bool
-                    Select the driver behaviour. In both cases, the driver will first retrieve the samples accumulated
-                    and available in the internal queue. Then, if blocking, the driver does not terminate the loop and
-                    waits for additional samples to come, thus behaving as a generator. If non-blocking, the driver
-                    terminates the loop and returns. Non-blocking behaviour can be used for periodically polling the
-                    driver, while blocking can be used when a separate thread is in charge of collecting the samples
-                    produced by the driver.
+                    Select the driver behaviour. In both cases, the driver will
+                    first retrieve the samples accumulated and available in the
+                    internal queue. Then, if blocking, the driver does not
+                    terminate the loop and waits for additional samples to come,
+                    thus behaving as a generator. If non-blocking, the driver
+                    terminates the loop and returns. Non-blocking behaviour can
+                    be used for periodically polling the driver, while blocking
+                    can be used when a separate thread is in charge of
+                    collecting the samples produced by the driver.
                 terminate : threading.Event
-                    Signals the interruption of the GetState method as soon as possible.
+                    Signals the interruption of the GetState method as soon as
+                    possible.
             Returns:
                 results : Iterator[Tuple[float, str, Any]]
-                    Sequences of state sample. Each State sample contains a float Unix-like timestamps of the samples in
-                    seconds with up to microsecond resolution, the resource_key of the sample, and its resource_value.
-                    Only resources with an active subscription must be retrieved. Interval and duration of the sampling
-                    process are specified when creating the subscription using method SubscribeState(). Order of values
-                    yielded is arbitrary.
+                    Sequences of state sample. Each State sample contains a
+                    float Unix-like timestamps of the samples in seconds with up
+                    to microsecond resolution, the resource_key of the sample,
+                    and its resource_value.
+                    Only resources with an active subscription must be
+                    retrieved. Interval and duration of the sampling process are
+                    specified when creating the subscription using method
+                    SubscribeState(). Order of values yielded is arbitrary.
         """
         raise NotImplementedError()
diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py
index dd41096ec25fb74f1b1b855c98f90e09fee33194..9342e650b9fadb21fa1b65fb951a08ae6f066a3c 100644
--- a/src/device/service/drivers/openconfig/OpenConfigDriver.py
+++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py
@@ -61,11 +61,13 @@ class NetconfSessionHandler:
         self.__port = int(port)
         self.__username       = settings.get('username')
         self.__password       = settings.get('password')
+        self.__vendor         = settings.get('vendor')
         self.__key_filename   = settings.get('key_filename')
         self.__hostkey_verify = settings.get('hostkey_verify', True)
         self.__look_for_keys  = settings.get('look_for_keys', True)
         self.__allow_agent    = settings.get('allow_agent', True)
         self.__force_running  = settings.get('force_running', False)
+        self.__commit_per_delete  = settings.get('delete_rule', False)
         self.__device_params  = settings.get('device_params', {})
         self.__manager_params = settings.get('manager_params', {})
         self.__nc_params      = settings.get('nc_params', {})
@@ -90,6 +92,12 @@ class NetconfSessionHandler:
     @property
     def use_candidate(self): return self.__candidate_supported and not self.__force_running
 
+    @property
+    def commit_per_rule(self): return self.__commit_per_delete 
+
+    @property
+    def vendor(self): return self.__vendor
+
     @RETRY_DECORATOR
     def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin
         with self.__lock:
@@ -181,8 +189,9 @@ def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples :
         LOGGER.exception('Error retrieving samples')
 
 def edit_config(
-    netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, target='running',
-    default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin
+    netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False,
+    target='running', default_operation='merge', test_option=None, error_option=None,
+    format='xml' # pylint: disable=redefined-builtin
 ):
     str_method = 'DeleteConfig' if delete else 'SetConfig'
     LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources)))
@@ -195,13 +204,16 @@ def edit_config(
             chk_length(str_resource_name, resource, min_length=2, max_length=2)
             resource_key,resource_value = resource
             chk_string(str_resource_name + '.key', resource_key, allow_empty=False)
-            str_config_message = compose_config(resource_key, resource_value, delete=delete)
+            str_config_message = compose_config(
+                resource_key, resource_value, delete=delete, vendor=netconf_handler.vendor)
             if str_config_message is None: raise UnsupportedResourceKeyException(resource_key)
             LOGGER.info('[{:s}] str_config_message[{:d}] = {:s}'.format(
                 str_method, len(str_config_message), str(str_config_message)))
             netconf_handler.edit_config(
                 config=str_config_message, target=target, default_operation=default_operation,
                 test_option=test_option, error_option=error_option, format=format)
+            if commit_per_rule:
+                netconf_handler.commit()
             results[i] = True
         except Exception as e: # pylint: disable=broad-except
             str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting')
@@ -278,12 +290,15 @@ class OpenConfigDriver(_Driver):
         with self.__lock:
             if self.__netconf_handler.use_candidate:
                 with self.__netconf_handler.locked(target='candidate'):
-                    results = edit_config(self.__netconf_handler, resources, target='candidate')
-                    try:
-                        self.__netconf_handler.commit()
-                    except Exception as e: # pylint: disable=broad-except
-                        LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources)))
-                        results = [e for _ in resources] # if commit fails, set exception in each resource
+                    if self.__netconf_handler.commit_per_rule:
+                           results = edit_config(self.__netconf_handler, resources, target='candidate', commit_per_rule= True)
+                    else:
+                        results = edit_config(self.__netconf_handler, resources, target='candidate')
+                        try:
+                            self.__netconf_handler.commit()
+                        except Exception as e: # pylint: disable=broad-except
+                            LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources)))
+                            results = [e for _ in resources] # if commit fails, set exception in each resource
             else:
                 results = edit_config(self.__netconf_handler, resources)
         return results
@@ -294,12 +309,15 @@ class OpenConfigDriver(_Driver):
         with self.__lock:
             if self.__netconf_handler.use_candidate:
                 with self.__netconf_handler.locked(target='candidate'):
-                    results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True)
-                    try:
-                        self.__netconf_handler.commit()
-                    except Exception as e: # pylint: disable=broad-except
-                        LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources)))
-                        results = [e for _ in resources] # if commit fails, set exception in each resource
+                    if self.__netconf_handler.commit_per_rule:
+                           results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True)
+                    else:
+                        results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True)
+                        try:
+                            self.__netconf_handler.commit()
+                        except Exception as e: # pylint: disable=broad-except
+                            LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources)))
+                            results = [e for _ in resources] # if commit fails, set exception in each resource
             else:
                 results = edit_config(self.__netconf_handler, resources, delete=True)
         return results
diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py
index c11b1669d5b4cf3ca47986817ded28f75ae8358f..718a02d193531924bef863f5ccd2cbb999388dbd 100644
--- a/src/device/service/drivers/openconfig/templates/EndPoints.py
+++ b/src/device/service/drivers/openconfig/templates/EndPoints.py
@@ -20,7 +20,7 @@ from .Tools import add_value_from_collection, add_value_from_tag
 
 LOGGER = logging.getLogger(__name__)
 
-XPATH_PORTS = "//ocp:components/ocp:component/ocp:state[ocp:type='PORT']/.."
+XPATH_PORTS = "//ocp:components/ocp:component"
 XPATH_IFACE_COUNTER = "//oci:interfaces/oci:interface[oci:name='{:s}']/state/counters/{:s}"
 
 def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
@@ -28,6 +28,13 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
     for xml_component in xml_data.xpath(XPATH_PORTS, namespaces=NAMESPACES):
         #LOGGER.info('xml_component = {:s}'.format(str(ET.tostring(xml_component))))
 
+        component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES)
+        if component_type is None or component_type.text is None: continue
+        component_type = component_type.text
+        if component_type not in {'PORT', 'oc-platform-types:PORT'}: continue
+
+        LOGGER.info('PORT xml_component = {:s}'.format(str(ET.tostring(xml_component))))
+
         endpoint = {}
 
         component_name = xml_component.find('ocp:name', namespaces=NAMESPACES)
diff --git a/src/device/service/drivers/openconfig/templates/Interfaces.py b/src/device/service/drivers/openconfig/templates/Interfaces.py
index 33f977524c6f65655fbe17f6d2d95a7cfc223967..3f5b104f2de01137c2424e776dc60b8416088de6 100644
--- a/src/device/service/drivers/openconfig/templates/Interfaces.py
+++ b/src/device/service/drivers/openconfig/templates/Interfaces.py
@@ -37,6 +37,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         #interface_type = xml_interface.find('oci:config/oci:type', namespaces=NAMESPACES)
         #add_value_from_tag(interface, 'type', interface_type)
 
+        interface_type = xml_interface.find('oci:config/oci:type', namespaces=NAMESPACES)
+        interface_type.text = interface_type.text.replace('ianaift:','')
+        add_value_from_tag(interface, 'type', interface_type)
+
         interface_mtu = xml_interface.find('oci:config/oci:mtu', namespaces=NAMESPACES)
         add_value_from_tag(interface, 'mtu', interface_mtu, cast=int)
 
@@ -49,12 +53,15 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
             subinterface = {}
 
             add_value_from_tag(subinterface, 'name', interface_name)
+            add_value_from_tag(subinterface, 'mtu', interface_mtu)
+            add_value_from_tag(subinterface, 'type', interface_type)
+
 
             subinterface_index = xml_subinterface.find('oci:index', namespaces=NAMESPACES)
             if subinterface_index is None or subinterface_index.text is None: continue
             add_value_from_tag(subinterface, 'index', subinterface_index, cast=int)
 
-            vlan_id = xml_subinterface.find('ocv:vlan/ocv:config/ocv:vlan-id', namespaces=NAMESPACES)
+            vlan_id = xml_subinterface.find('ocv:vlan/ocv:match/ocv:single-tagged/ocv:config/ocv:vlan-id', namespaces=NAMESPACES)
             add_value_from_tag(subinterface, 'vlan_id', vlan_id, cast=int)
 
             # TODO: implement support for multiple IP addresses per subinterface
diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py
index b091a0d206195a6c2ce94008628071cd9e30944f..8399402fa76b8b6b00829493cc8ebd28fd6018f4 100644
--- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py
+++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py
@@ -27,6 +27,9 @@ XPATH_NI_IIP_AP         = ".//ocni:inter-instance-policies/ocni:apply-policy"
 XPATH_NI_IIP_AP_IMPORT  = ".//ocni:config/ocni:import-policy"
 XPATH_NI_IIP_AP_EXPORT  = ".//ocni:config/ocni:export-policy"
 
+XPATH_NI_CPOINTS          = ".//ocni:connection-points/ocni:connection-point"
+XPATH_NI_CPOINTS_ENDPOINT = ".//ocni:endpoints/ocni:endpoint/ocni:remote/ocni:config"
+
 def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
     response = []
     for xml_network_instance in xml_data.xpath(XPATH_NETWORK_INSTANCES, namespaces=NAMESPACES):
@@ -39,10 +42,11 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         add_value_from_tag(network_instance, 'name', ni_name)
 
         ni_type = xml_network_instance.find('ocni:config/ocni:type', namespaces=NAMESPACES)
+        ni_type.text = ni_type.text.replace('oc-ni-types:','')
         add_value_from_tag(network_instance, 'type', ni_type)
 
-        #ni_router_id = xml_network_instance.find('ocni:config/ocni:router-id', namespaces=NAMESPACES)
-        #add_value_from_tag(network_instance, 'router_id', ni_router_id)
+        ni_router_id = xml_network_instance.find('ocni:config/ocni:router-id', namespaces=NAMESPACES)
+        add_value_from_tag(network_instance, 'router_id', ni_router_id)
 
         ni_route_dist = xml_network_instance.find('ocni:config/ocni:route-distinguisher', namespaces=NAMESPACES)
         add_value_from_tag(network_instance, 'route_distinguisher', ni_route_dist)
@@ -53,6 +57,20 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         if len(network_instance) == 0: continue
         response.append(('/network_instance[{:s}]'.format(network_instance['name']), network_instance))
 
+        for xml_cpoints in xml_network_instance.xpath(XPATH_NI_PROTOCOLS, namespaces=NAMESPACES):
+            cpoint = {}
+            add_value_from_tag(cpoint, 'name', ni_name)
+
+            connection_point = xml_cpoints.find('ocni:connection-point-id', namespaces=NAMESPACES)
+            add_value_from_tag(cpoint, 'connection_point', connection_point)
+
+            for xml_endpoint in xml_cpoints.xpath(XPATH_NI_CPOINTS_ENDPOINT, namespaces=NAMESPACES):
+                remote_system = xml_endpoint.find('ocni:remote-system', namespaces=NAMESPACES)
+                add_value_from_tag(cpoint, 'remote_system', remote_system)
+
+                VC_ID = xml_endpoint.find('ocni:virtual-circuit-identifier', namespaces=NAMESPACES)
+                add_value_from_tag(cpoint, 'VC_ID', VC_ID)
+
         for xml_protocol in xml_network_instance.xpath(XPATH_NI_PROTOCOLS, namespaces=NAMESPACES):
             #LOGGER.info('xml_protocol = {:s}'.format(str(ET.tostring(xml_protocol))))
 
@@ -71,6 +89,8 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
             if protocol['identifier'] == 'BGP':
                 bgp_as = xml_protocol.find('ocni:bgp/ocni:global/ocni:config/ocni:as', namespaces=NAMESPACES)
                 add_value_from_tag(protocol, 'as', bgp_as, cast=int)
+                bgp_id = xml_protocol.find('ocni:bgp/ocni:global/ocni:config/ocni:router-id', namespaces=NAMESPACES)
+                add_value_from_tag(protocol, 'router_id', bgp_id)
 
             resource_key = '/network_instance[{:s}]/protocols[{:s}]'.format(
                 network_instance['name'], protocol['identifier'])
@@ -94,7 +114,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
             add_value_from_tag(table_connection, 'address_family', address_family,
                                cast=lambda s: s.replace('oc-types:', ''))
 
-            default_import_policy = xml_table_connection.find('ocni:default-import-policy', namespaces=NAMESPACES)
+            default_import_policy = xml_table_connection.find('ocni:config/ocni:default-import-policy', namespaces=NAMESPACES)
             add_value_from_tag(table_connection, 'default_import_policy', default_import_policy)
 
             resource_key = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format(
@@ -125,4 +145,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
                     iip_ap['name'], iip_ap['export_policy'])
                 response.append((resource_key, iip_ap))
 
+
+
+
     return response
diff --git a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py
index 369732de3fe58c52a2e9ab2227899160d091ff68..068ca5430d9135e784dbe9a07f80d81472cbf5cc 100644
--- a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py
+++ b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py
@@ -74,7 +74,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         resource_key = '/routing_policy/bgp_defined_set[{:s}]'.format(bgp_ext_community_set['ext_community_set_name'])
         response.append((resource_key, copy.deepcopy(bgp_ext_community_set)))
 
-        ext_community_member = xml_bgp_ext_community_set.find('ocbp:ext-community-member', namespaces=NAMESPACES)
+        ext_community_member = xml_bgp_ext_community_set.find('ocbp:config/ocbp:ext-community-member', namespaces=NAMESPACES)
         if ext_community_member is not None and ext_community_member.text is not None:
             add_value_from_tag(bgp_ext_community_set, 'ext_community_member', ext_community_member)
 
diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py
index 901f5cf0291dca1bda155e20abd16db5989df7dc..5e77b25fe3206407db9427085de70b95342d370a 100644
--- a/src/device/service/drivers/openconfig/templates/__init__.py
+++ b/src/device/service/drivers/openconfig/templates/__init__.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import json, logging, lxml.etree as ET, re
-from typing import Any, Dict
+from typing import Any, Dict, Optional
 from jinja2 import Environment, PackageLoader, select_autoescape
 from device.service.driver_api._Driver import (
     RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_ACL)
@@ -77,9 +77,11 @@ def parse(resource_key : str, xml_data : ET.Element):
     if parser is None: return [(resource_key, xml_data)]
     return parser(xml_data)
 
-def compose_config(resource_key : str, resource_value : str, delete : bool = False) -> str:
+def compose_config(
+    resource_key : str, resource_value : str, delete : bool = False, vendor : Optional[str] = None
+) -> str:
     template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key))
     template = JINJA_ENV.get_template(template_name)
     data : Dict[str, Any] = json.loads(resource_value)
     operation = 'delete' if delete else 'merge'
-    return '<config>{:s}</config>'.format(template.render(**data, operation=operation).strip())
+    return '<config>{:s}</config>'.format(template.render(**data, operation=operation, vendor=vendor).strip())
diff --git a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml
index fac259b6fdcd3cbded93088ddc6335ea2bfe5f69..2769e8b2e9f81326332ae175f915432b7337f24c 100644
--- a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml
@@ -13,6 +13,16 @@
           <config>
             <sequence-id>{{sequence_id}}</sequence-id>
           </config>
+          {% if operation is not defined or operation != 'delete' %}
+          {% if type=='ACL_L2' %}
+          <l2>
+            <config>
+              {% if source_address is defined %}<source-mac>{{source_address}}</source-mac>{% endif%}
+              {% if destination_address is defined %}<destination-mac>{{destination_address}}</destination-mac>{% endif%}
+            </config>
+          </l2>
+          {% endif%}
+          {% if type=='ACL_IPV4' %}
           <ipv4>
             <config>
               {% if source_address is defined %}<source-address>{{source_address}}</source-address>{% endif%}
@@ -29,12 +39,26 @@
               {% if tcp_flags is defined %}<tcp-flags>{{tcp_flags}}</tcp-flags>{% endif%}
             </config>
           </transport>
+         {% endif%}
+         {% if type=='ACL_IPV6' %}
+          <ipv6>
+            <config>
+              {% if source_address is defined %}<source-address>{{source_address}}</source-address>{% endif%}
+              {% if destination_address is defined %}<destination-address>{{destination_address}}</destination-address>{% endif%}
+              {% if protocol is defined %}<protocol>{{protocol}}</protocol>{% endif%}
+              {% if dscp is defined %}<dscp>{{dscp}}</dscp>{% endif%}
+              {% if hop_limit is defined %}<hop-limit>{{hop_limit}}</hop-limit>{% endif%}
+            </config>
+          </ipv6>
+         {% endif%}
+          
           <actions>
             <config>
               {% if forwarding_action is defined %}<forwarding-action>{{forwarding_action}}</forwarding-action>{% endif%}
               {% if log_action is defined %}<log-action>{{log_action}}</log-action>{% endif%}
             </config>
           </actions>
+          {% endif%}
         </acl-entry>
       </acl-entries>
     </acl-set>
diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml
index d987b0cc4b40298533f140f71af83c6fad884020..b070b305a505890c51f3751d2b83eb415ae4aa43 100644
--- a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml
@@ -1,18 +1,21 @@
 <acl xmlns="http://openconfig.net/yang/acl">
   <interfaces>
-    <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
+    <interface {% if operation is defined %}{% if all is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}>
       <id>{{id}}</id>
       <config>
         <id>{{id}}</id>
       </config>
+      {% if interface is defined %}
       <interface-ref>
         <config>
           <interface>{{interface}}</interface>
           {% if subinterface is defined %}<subinterface>{{subinterface}}</subinterface>{% endif%}
         </config>
       </interface-ref>
+      {% endif%}
+      {% if set_name_egress is defined %}
       <egress-acl-sets>
-        <egress-acl-set>
+        <egress-acl-set {% if operation is defined %}{% if egress is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}>>
           <set-name>{{set_name_egress}}</set-name>
           <type>{{type_egress}}</type>
           <config>
@@ -21,6 +24,7 @@
           </config>
         </egress-acl-set>
       </egress-acl-sets>
+      {% endif%}
     </interface>
   </interfaces>
 </acl>
diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml
index 144a03c55477e532379541be5443063fe3aa2f10..d1f18efb26bc1316354c2bb26623cb36f7dc0be6 100644
--- a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml
@@ -1,18 +1,21 @@
 <acl xmlns="http://openconfig.net/yang/acl">
   <interfaces>
-    <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
+    <interface {% if operation is defined %}{% if all is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}>
       <id>{{id}}</id>
       <config>
         <id>{{id}}</id>
       </config>
+      {% if interface is defined %}
       <interface-ref>
         <config>
           <interface>{{interface}}</interface>
           {% if subinterface is defined %}<subinterface>{{subinterface}}</subinterface>{% endif%}
         </config>
       </interface-ref>
+      {% endif%}
+      {% if set_name_ingress is defined %}
       <ingress-acl-sets>
-        <ingress-acl-set>
+        <ingress-acl-set {% if operation is defined %}{% if ingress is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}>
           <set-name>{{set_name_ingress}}</set-name>
           <type>{{type_ingress}}</type>
           <config>
@@ -21,6 +24,7 @@
           </config>
         </ingress-acl-set>
       </ingress-acl-sets>
+      {% endif%}
     </interface>
   </interfaces>
 </acl>
diff --git a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml
index ff15d1d682ea910208237c32adcc93029fb036d8..4bc53ff1ddfbebbdcef2a0b4c37770210726676b 100644
--- a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml
@@ -1,14 +1,12 @@
 <interfaces xmlns="http://openconfig.net/yang/interfaces">
-    <interface{% if operation is defined and operation != 'delete' %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
+    <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
         <name>{{name}}</name>
+        {% if operation is defined and operation != 'delete' %}
         <config>
             <name>{{name}}</name>
-            {% if operation is defined and operation == 'delete' %}
             <description></description>
-            {% else %}
-            <description>{{description}}</description>
             <mtu>{{mtu}}</mtu>
-            {% endif %}
         </config>
+       {% endif %}
     </interface>
 </interfaces>
diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml
index d266f819c41355ba8a30086415f2bba3b68f1f3d..1bdb8efbff495f04ee90dadaffaa7412332531b7 100644
--- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml
@@ -1,35 +1,46 @@
-<interfaces xmlns="http://openconfig.net/yang/interfaces">
+<interfaces xmlns="http://openconfig.net/yang/interfaces" 
+            xmlns:oc-ip="http://openconfig.net/yang/interfaces/ip" >
     <interface>
         <name>{{name}}</name>
-        {% if operation is not defined or operation != 'delete' %}
         <config>
             <name>{{name}}</name>
+            <type xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type">ianaift:{{type}}</type>
+            {% if mtu is defined %}<mtu>{{mtu}}</mtu>{% endif%}
+            <enabled>true</enabled>
         </config>
-        {% endif %}
         <subinterfaces>
-            <subinterface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
+            <subinterface>
                 <index>{{index}}</index>
-                {% if operation is not defined or operation != 'delete' %}
                 <config>
                     <index>{{index}}</index>
-                    <enabled>true</enabled>
+                    <description>{{description}}</description>
+                    {% if vendor=="ADVA" and vlan_id is not defined %}
+                    <untagged-allowed xmlns="http://www.advaoptical.com/cim/adva-dnos-oc-interfaces">true</untagged-allowed>
+                    {% endif%}
                 </config>
+                {% if vlan_id is defined %}
                 <vlan xmlns="http://openconfig.net/yang/vlan">
-                    <config>
-                        <vlan-id>{{vlan_id}}</vlan-id>
-                    </config>
-                </vlan>
-                <ipv4 xmlns="http://openconfig.net/yang/interfaces/ip">
-                    <addresses>
-                        <address>
-                            <ip>{{address_ip}}</ip>
+                    <match>
+                        <single-tagged>
                             <config>
-                                <ip>{{address_ip}}</ip>
-                                <prefix-length>{{address_prefix}}</prefix-length>
+                                <vlan-id>{{vlan_id}}</vlan-id>
                             </config>
-                        </address>
-                    </addresses>
-                </ipv4>
+                        </single-tagged>
+                    </match>
+                </vlan>
+                {% endif %}
+                {% if address_ip is defined %}
+                <oc-ip:ipv4>
+                    <oc-ip:addresses>
+                        <oc-ip:address>
+                            <oc-ip:ip>{{address_ip}}</oc-ip:ip>
+                            <oc-ip:config>
+                                <oc-ip:ip>{{address_ip}}</oc-ip:ip>
+                                <oc-ip:prefix-length>{{address_prefix}}</oc-ip:prefix-length>
+                            </oc-ip:config>
+                        </oc-ip:address>
+                    </oc-ip:addresses>
+                </oc-ip:ipv4>
                 {% endif %}
             </subinterface>
         </subinterfaces>
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml
new file mode 100644
index 0000000000000000000000000000000000000000..60272e5fba4dd87c9bc48ef596197c2508b75e59
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml
@@ -0,0 +1,29 @@
+<network-instances xmlns="http://openconfig.net/yang/network-instance">
+    <network-instance>
+        <name>{{name}}</name>
+        <connection-points>
+            <connection-point>
+                <connection-point-id>{{connection_point}}</connection-point-id>
+                <config>
+                    <connection-point-id>{{connection_point}}</connection-point-id>
+                </config>
+                <endpoints>
+                    <endpoint>
+                        <endpoint-id>{{connection_point}}</endpoint-id>
+                        <config>
+                            <endpoint-id>{{connection_point}}</endpoint-id>
+                            <precedence>1</precedence>
+                            <type xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types">oc-ni-types:REMOTE</type>
+                        </config>
+                        <remote>
+                            <config>
+                                <virtual-circuit-identifier>{{VC_ID}}</virtual-circuit-identifier>
+                                <remote-system>{{remote_system}}</remote-system>
+                            </config>
+                        </remote>
+                    </endpoint>
+                </endpoints>
+            </connection-point>
+        </connection-points>
+    </network-instance>
+</network-instances>
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml
index 9362c09c6cfebcd1f83b05002f58eda51724b911..17b07df7233e94f16923c5da49eef2b8b5ccda82 100644
--- a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml
@@ -5,7 +5,8 @@
         <config>
             <name>{{name}}</name>
             <type xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types">oc-ni-types:{{type}}</type>
-            <description>{{description}}</description>
+            {% if type=='L3VRF' %}
+            {% if description is defined %}<description>{{description}}</description>{% endif %}
             {% if router_id is defined %}<router-id>{{router_id}}</router-id>{% endif %}
             <route-distinguisher>{{route_distinguisher}}</route-distinguisher>
             <enabled>true</enabled>
@@ -13,8 +14,29 @@
         <encapsulation>
             <config>
                 <encapsulation-type xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types">oc-ni-types:MPLS</encapsulation-type>
+                <label-allocation-mode xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types">oc-ni-types:INSTANCE_LABEL</label-allocation-mode>
             </config>
         </encapsulation>
+            {% endif %}
+            {% if type=='L2VSI' %}
+            {% if description is defined %}<description>{{description}}</description>{% endif %}
+            <enabled>true</enabled>
+            <mtu>1500</mtu>
+        </config>
+        <encapsulation>
+            <config>
+                <encapsulation-type xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types">oc-ni-types:MPLS</encapsulation-type>
+            </config>
+        </encapsulation>
+        <fdb>
+            <config>
+                <mac-learning>true</mac-learning>
+                <maximum-entries>1000</maximum-entries>
+                <mac-aging-time>300</mac-aging-time>
+            </config>
+        </fdb>
+            {% endif %}
+
         {% endif %}
     </network-instance>
 </network-instances>
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml
index d5c33d31a6d671216db55c0eded94dc15a56bec8..bf8c0c0770f9344fbed16f3a6b09f7fa99a978ef 100644
--- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml
@@ -2,15 +2,13 @@
     <network-instance>
         <name>{{name}}</name>
         <interfaces>
-            <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
+            <interface>
                 <id>{{id}}</id>
-                {% if operation is not defined or operation != 'delete' %}
                 <config>
                     <id>{{id}}</id>
                     <interface>{{interface}}</interface>
                     <subinterface>{{subinterface}}</subinterface>
                 </config>
-                {% endif %}
             </interface>
         </interfaces>
     </network-instance>
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml
index da05d0467605e6cec0c3448cc325ff60dfc7cfc9..c9c068e480c0569cfe5f97b78b28fbe03e2595f8 100644
--- a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml
@@ -3,19 +3,19 @@
         <name>{{name}}</name>
         <protocols>
             <protocol{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
-                <identifier>{{identifier}}</identifier>
+                <identifier xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</identifier>
                 <name>{{protocol_name}}</name>
                 {% if operation is not defined or operation != 'delete' %}
                 <config>
-                    <identifier>{{identifier}}</identifier>
+                    <identifier xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</identifier>
                     <name>{{protocol_name}}</name>
-                    <enabled>true</enabled>
                 </config>
                 {% if identifier=='BGP' %}
                 <bgp>
                     <global>
                         <config>
                             <as>{{as}}</as>
+                            <router-id>{{router_id}}</router-id>
                         </config>
                     </global>
                 </bgp>
@@ -23,5 +23,18 @@
                 {% endif %}
             </protocol>
         </protocols>
+        {% if operation is not defined or operation != 'delete' %}
+
+        <tables>
+            <table>
+                <protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</protocol>
+                <address-family xmlns:oc-types="http://openconfig.net/yang/openconfig-types">oc-types:IPV4</address-family>
+                <config>
+                    <protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</protocol>
+                    <address-family xmlns:oc-types="http://openconfig.net/yang/openconfig-types">oc-types:IPV4</address-family>
+                </config>
+            </table>
+        </tables>
+        {% endif %}
     </network-instance>
 </network-instances>
diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml
index df64606ae5ab434e5e3453f7294db02bb749bdce..6843c2dcbd306b149a4168565447d11174eceadc 100644
--- a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml
@@ -5,7 +5,10 @@
                 <ext-community-set{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
                     <ext-community-set-name>{{ext_community_set_name}}</ext-community-set-name>
                     {% if operation is not defined or operation != 'delete' %}
-                    {% if ext_community_member is defined %} <ext-community-member>{{ext_community_member}}</ext-community-member>{% endif %}
+                        <config>
+                            <ext-community-set-name>{{ext_community_set_name}}</ext-community-set-name>
+                            <ext-community-member>{{ext_community_member}}</ext-community-member>
+                        </config>
                     {% endif %}
                 </ext-community-set>
             </ext-community-sets>
diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml
index 711067f424b68da0e69913ce01f5133c5cbbfe02..eda2d99c9f6299f7345767db8bed8e8cc58284ae 100644
--- a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml
@@ -1,8 +1,11 @@
-{% if operation is not defined or operation != 'delete' %}
 <routing-policy xmlns="http://openconfig.net/yang/routing-policy">
     <policy-definitions>
-        <policy-definition>
+        <policy-definition {% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
             <name>{{policy_name}}</name>
+            {% if operation is not defined or operation != 'delete' %}
+            <config>
+                <name>{{policy_name}}</name>
+            </config>
             <statements>
                 <statement>
                     <name>{{statement_name}}</name>
@@ -10,11 +13,13 @@
                         <name>{{statement_name}}</name>
                     </config>
                     <conditions>
+                         <config>
+                            <install-protocol-eq xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:DIRECTLY_CONNECTED</install-protocol-eq>
+                        </config>
                         <bgp-conditions xmlns="http://openconfig.net/yang/bgp-policy">
-                            <match-ext-community-set>
+                             <config>
                                 <ext-community-set>{{ext_community_set_name}}</ext-community-set>
-                                <match-set-options>{{match_set_options}}</match-set-options>
-                            </match-ext-community-set>
+                            </config>
                         </bgp-conditions>
                     </conditions>
                     <actions>
@@ -24,7 +29,7 @@
                     </actions>
                 </statement>
             </statements>
+            {% endif %}
         </policy-definition>
     </policy-definitions>
 </routing-policy>
-{% endif %}
diff --git a/src/device/service/drivers/p4/__init__.py b/src/device/service/drivers/p4/__init__.py
index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644
--- a/src/device/service/drivers/p4/__init__.py
+++ b/src/device/service/drivers/p4/__init__.py
@@ -11,4 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
diff --git a/src/device/service/drivers/p4/p4_client.py b/src/device/service/drivers/p4/p4_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..600d08880c7e8a1d6a7238e60d66a87d7167bd8c
--- /dev/null
+++ b/src/device/service/drivers/p4/p4_client.py
@@ -0,0 +1,607 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+P4Runtime client.
+"""
+
+import logging
+import queue
+import sys
+import enum
+import threading
+from functools import wraps
+from typing import NamedTuple
+import grpc
+import google.protobuf.text_format
+from google.rpc import status_pb2, code_pb2
+
+from p4.v1 import p4runtime_pb2
+from p4.v1 import p4runtime_pb2_grpc
+
+STREAM_ATTR_ARBITRATION = "arbitration"
+STREAM_ATTR_PACKET = "packet"
+STREAM_ATTR_DIGEST = "digest"
+STREAM_ATTR_IDLE_NOT = "idle_timeout_notification"
+STREAM_ATTR_UNKNOWN = "unknown"
+
+LOGGER = logging.getLogger(__name__)
+
+
+class P4RuntimeErrorFormatException(Exception):
+    """
+    P4Runtime error format exception.
+    """
+
+
+# Used to iterate over the p4.Error messages in a gRPC error Status object
+class P4RuntimeErrorIterator:
+    """
+    P4Runtime error iterator.
+
+    Attributes
+    ----------
+    grpc_error : object
+        gRPC error
+    """
+
+    def __init__(self, grpc_error):
+        assert grpc_error.code() == grpc.StatusCode.UNKNOWN
+        self.grpc_error = grpc_error
+
+        error = None
+        # The gRPC Python package does not have a convenient way to access the
+        # binary details for the error: they are treated as trailing metadata.
+        for meta in self.grpc_error.trailing_metadata():
+            if meta[0] == "grpc-status-details-bin":
+                error = status_pb2.Status()
+                error.ParseFromString(meta[1])
+                break
+        if error is None:
+            raise P4RuntimeErrorFormatException("No binary details field")
+
+        if len(error.details) == 0:
+            raise P4RuntimeErrorFormatException(
+                "Binary details field has empty Any details repeated field")
+        self.errors = error.details
+        self.idx = 0
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        while self.idx < len(self.errors):
+            p4_error = p4runtime_pb2.Error()
+            one_error_any = self.errors[self.idx]
+            if not one_error_any.Unpack(p4_error):
+                raise P4RuntimeErrorFormatException(
+                    "Cannot convert Any message to p4.Error")
+            if p4_error.canonical_code == code_pb2.OK:
+                continue
+            val = self.idx, p4_error
+            self.idx += 1
+            return val
+        raise StopIteration
+
+
+class P4RuntimeWriteException(Exception):
+    """
+    P4Runtime write exception handler.
+
+    Attributes
+    ----------
+    grpc_error : object
+        gRPC error
+    """
+
+    def __init__(self, grpc_error):
+        assert grpc_error.code() == grpc.StatusCode.UNKNOWN
+        super().__init__()
+        self.errors = []
+        try:
+            error_iterator = P4RuntimeErrorIterator(grpc_error)
+            for error_tuple in error_iterator:
+                self.errors.append(error_tuple)
+        except P4RuntimeErrorFormatException as ex:
+            raise P4RuntimeException(grpc_error) from ex
+
+    def __str__(self):
+        message = "Error(s) during Write:\n"
+        for idx, p4_error in self.errors:
+            code_name = code_pb2._CODE.values_by_number[
+                p4_error.canonical_code].name
+            message += f"\t* At index {idx}: {code_name}, " \
+                       f"'{p4_error.message}'\n"
+        return message
+
+
+class P4RuntimeException(Exception):
+    """
+    P4Runtime exception handler.
+
+    Attributes
+    ----------
+    grpc_error : object
+        gRPC error
+    """
+
+    def __init__(self, grpc_error):
+        super().__init__()
+        self.grpc_error = grpc_error
+
+    def __str__(self):
+        message = f"P4Runtime RPC error ({self.grpc_error.code().name}): " \
+                  f"{self.grpc_error.details()}"
+        return message
+
+
+def parse_p4runtime_write_error(func):
+    """
+    Parse P4Runtime write error.
+
+    :param func: function
+    :return: parsed error
+    """
+
+    @wraps(func)
+    def handle(*args, **kwargs):
+        try:
+            return func(*args, **kwargs)
+        except grpc.RpcError as ex:
+            if ex.code() != grpc.StatusCode.UNKNOWN:
+                raise ex
+            raise P4RuntimeWriteException(ex) from None
+
+    return handle
+
+
+def parse_p4runtime_error(func):
+    """
+    Parse P4Runtime error.
+
+    :param func: function
+    :return: parsed error
+    """
+
+    @wraps(func)
+    def handle(*args, **kwargs):
+        try:
+            return func(*args, **kwargs)
+        except grpc.RpcError as ex:
+            raise P4RuntimeException(ex) from None
+
+    return handle
+
+
+class SSLOptions(NamedTuple):
+    """
+    Tuple of SSL options.
+    """
+    insecure: bool
+    cacert: str = None
+    cert: str = None
+    key: str = None
+
+
+def read_pem_file(path):
+    """
+    Load and read PEM file.
+
+    :param path: path to PEM file
+    :return: file descriptor
+    """
+    try:
+        with open(path, "rb") as f_d:
+            return f_d.read()
+    except (FileNotFoundError, IOError, OSError):
+        logging.critical("Cannot read from PEM file '%s'", path)
+        sys.exit(1)
+
+
+@enum.unique
+class WriteOperation(enum.Enum):
+    """
+    Write Operations.
+    """
+    insert = 1
+    update = 2
+    delete = 3
+
+
+def select_operation(mode):
+    """
+    Select P4 operation based upon the operation mode.
+
+    :param mode: operation mode
+    :return: P4 operation protobuf object
+    """
+    if mode == WriteOperation.insert:
+        return p4runtime_pb2.Update.INSERT
+    if mode == WriteOperation.update:
+        return p4runtime_pb2.Update.UPDATE
+    if mode == WriteOperation.delete:
+        return p4runtime_pb2.Update.DELETE
+    return None
+
+
+def select_entity_type(entity, update):
+    """
+    Select P4 entity type for an update.
+
+    :param entity: P4 entity object
+    :param update: update operation
+    :return: the correct update entity or None
+    """
+    if isinstance(entity, p4runtime_pb2.TableEntry):
+        return update.entity.table_entry
+    if isinstance(entity, p4runtime_pb2.ActionProfileGroup):
+        return update.entity.action_profile_group
+    if isinstance(entity, p4runtime_pb2.ActionProfileMember):
+        return update.entity.action_profile_member
+    return None
+
+
+class P4RuntimeClient:
+    """
+    P4Runtime client.
+
+    Attributes
+    ----------
+    device_id : int
+        P4 device ID
+    grpc_address : str
+        IP address and port
+    election_id : tuple
+        Mastership election ID
+    role_name : str
+        Role name (optional)
+    ssl_options: tuple
+        SSL options" named tuple (optional)
+    """
+
+    def __init__(self, device_id, grpc_address,
+                 election_id, role_name=None, ssl_options=None):
+        self.device_id = device_id
+        self.election_id = election_id
+        self.role_name = role_name
+        if ssl_options is None:
+            self.ssl_options = SSLOptions(True)
+        else:
+            self.ssl_options = ssl_options
+        LOGGER.debug(
+            "Connecting to device %d at %s", device_id, grpc_address)
+
+        if self.ssl_options.insecure:
+            logging.debug("Using insecure channel")
+            self.channel = grpc.insecure_channel(grpc_address)
+        else:
+            # root certificates are retrieved from a default location
+            # chosen by gRPC runtime unless the user provides
+            # custom certificates.
+            root_certificates = None
+            if self.ssl_options.cacert is not None:
+                root_certificates = read_pem_file(self.ssl_options.cacert)
+            certificate_chain = None
+            if self.ssl_options.cert is not None:
+                certificate_chain = read_pem_file(self.ssl_options.cert)
+            private_key = None
+            if self.ssl_options.key is not None:
+                private_key = read_pem_file(self.ssl_options.key)
+            creds = grpc.ssl_channel_credentials(root_certificates, private_key,
+                                                 certificate_chain)
+            self.channel = grpc.secure_channel(grpc_address, creds)
+        self.stream_in_q = None
+        self.stream_out_q = None
+        self.stream = None
+        self.stream_recv_thread = None
+        self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel)
+
+        try:
+            self.set_up_stream()
+        except P4RuntimeException:
+            LOGGER.critical("Failed to connect to P4Runtime server")
+            sys.exit(1)
+        LOGGER.info("P4Runtime client is successfully invoked")
+
+    def set_up_stream(self):
+        """
+        Set up a gRPC stream.
+        """
+        self.stream_out_q = queue.Queue()
+        # queues for different messages
+        self.stream_in_q = {
+            STREAM_ATTR_ARBITRATION: queue.Queue(),
+            STREAM_ATTR_PACKET: queue.Queue(),
+            STREAM_ATTR_DIGEST: queue.Queue(),
+            STREAM_ATTR_IDLE_NOT: queue.Queue(),
+            STREAM_ATTR_UNKNOWN: queue.Queue(),
+        }
+
+        def stream_req_iterator():
+            while True:
+                stream_p = self.stream_out_q.get()
+                if stream_p is None:
+                    break
+                yield stream_p
+
+        def stream_recv_wrapper(stream):
+            @parse_p4runtime_error
+            def stream_recv():
+                for stream_p in stream:
+                    if stream_p.HasField("arbitration"):
+                        self.stream_in_q["arbitration"].put(stream_p)
+                    elif stream_p.HasField("packet"):
+                        self.stream_in_q["packet"].put(stream_p)
+                    elif stream_p.HasField("digest"):
+                        self.stream_in_q["digest"].put(stream_p)
+                    else:
+                        self.stream_in_q["unknown"].put(stream_p)
+
+            try:
+                stream_recv()
+            except P4RuntimeException as ex:
+                logging.critical("StreamChannel error, closing stream")
+                logging.critical(ex)
+                for k in self.stream_in_q:
+                    self.stream_in_q[k].put(None)
+
+        self.stream = self.stub.StreamChannel(stream_req_iterator())
+        self.stream_recv_thread = threading.Thread(
+            target=stream_recv_wrapper, args=(self.stream,))
+        self.stream_recv_thread.start()
+        self.handshake()
+
+    def handshake(self):
+        """
+        Handshake with gRPC server.
+        """
+
+        req = p4runtime_pb2.StreamMessageRequest()
+        arbitration = req.arbitration
+        arbitration.device_id = self.device_id
+        election_id = arbitration.election_id
+        election_id.high = self.election_id[0]
+        election_id.low = self.election_id[1]
+        if self.role_name is not None:
+            arbitration.role.name = self.role_name
+        self.stream_out_q.put(req)
+
+        rep = self.get_stream_packet(STREAM_ATTR_ARBITRATION, timeout=2)
+        if rep is None:
+            logging.critical("Failed to establish session with server")
+            sys.exit(1)
+        is_primary = (rep.arbitration.status.code == code_pb2.OK)
+        logging.debug("Session established, client is '%s'",
+                      "primary" if is_primary else "backup")
+        if not is_primary:
+            print("You are not the primary client,"
+                  "you only have read access to the server")
+
+    def get_stream_packet(self, type_, timeout=1):
+        """
+        Get a new message from the stream.
+
+        :param type_: stream type.
+        :param timeout: time to wait.
+        :return: message or None
+        """
+        if type_ not in self.stream_in_q:
+            print("Unknown stream type 's"'', type_)
+            return None
+        try:
+            msg = self.stream_in_q[type_].get(timeout=timeout)
+            return msg
+        except queue.Empty:  # timeout expired
+            return None
+
+    @parse_p4runtime_error
+    def get_p4info(self):
+        """
+        Retrieve P4Info content.
+
+        :return: P4Info object.
+        """
+        logging.debug("Retrieving P4Info file")
+        req = p4runtime_pb2.GetForwardingPipelineConfigRequest()
+        req.device_id = self.device_id
+        req.response_type = \
+            p4runtime_pb2.GetForwardingPipelineConfigRequest.P4INFO_AND_COOKIE
+        rep = self.stub.GetForwardingPipelineConfig(req)
+        return rep.config.p4info
+
+    @parse_p4runtime_error
+    def set_fwd_pipe_config(self, p4info_path, bin_path):
+        """
+        Configure the pipeline.
+
+        :param p4info_path: path to the P4Info file
+        :param bin_path: path to the binary file
+        :return:
+        """
+        logging.debug("Setting forwarding pipeline config")
+        req = p4runtime_pb2.SetForwardingPipelineConfigRequest()
+        req.device_id = self.device_id
+        if self.role_name is not None:
+            req.role = self.role_name
+        election_id = req.election_id
+        election_id.high = self.election_id[0]
+        election_id.low = self.election_id[1]
+        req.action = \
+            p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
+        with open(p4info_path, "r", encoding="utf-8") as f_info:
+            with open(bin_path, "rb") as f_bin:
+                try:
+                    google.protobuf.text_format.Merge(
+                        f_info.read(), req.config.p4info)
+                except google.protobuf.text_format.ParseError:
+                    logging.error("Error when parsing P4Info")
+                    raise
+                req.config.p4_device_config = f_bin.read()
+        return self.stub.SetForwardingPipelineConfig(req)
+
+    def tear_down(self):
+        """
+        Tear connection with the gRPC server down.
+        """
+        if self.stream_out_q:
+            logging.debug("Cleaning up stream")
+            self.stream_out_q.put(None)
+        if self.stream_in_q:
+            for k in self.stream_in_q:
+                self.stream_in_q[k].put(None)
+        if self.stream_recv_thread:
+            self.stream_recv_thread.join()
+        self.channel.close()
+        # avoid a race condition if channel deleted when process terminates
+        del self.channel
+
+    @parse_p4runtime_write_error
+    def __write(self, entity, mode=WriteOperation.insert):
+        """
+        Perform a write operation.
+
+        :param entity: P4 entity to write
+        :param mode: operation mode (defaults to insert)
+        :return: void
+        """
+        if isinstance(entity, (list, tuple)):
+            for ent in entity:
+                self.__write(ent)
+            return
+        req = self.__get_new_write_request()
+        update = req.updates.add()
+        update.type = select_operation(mode)
+        msg_entity = select_entity_type(entity, update)
+        if not msg_entity:
+            msg = f"{mode.name} operation for entity {entity.__name__}" \
+                  f"not supported"
+            raise P4RuntimeWriteException(msg)
+        msg_entity.CopyFrom(entity)
+        self.__simple_write(req)
+
+    def __get_new_write_request(self):
+        """
+        Create a new write request message.
+
+        :return: write request message
+        """
+        req = p4runtime_pb2.WriteRequest()
+        req.device_id = self.device_id
+        if self.role_name is not None:
+            req.role = self.role_name
+        election_id = req.election_id
+        election_id.high = self.election_id[0]
+        election_id.low = self.election_id[1]
+        return req
+
+    @parse_p4runtime_write_error
+    def __simple_write(self, req):
+        """
+        Send a write operation into the wire.
+
+        :param req: write operation request
+        :return: void
+        """
+        try:
+            return self.stub.Write(req)
+        except grpc.RpcError as ex:
+            if ex.code() != grpc.StatusCode.UNKNOWN:
+                raise ex
+            raise P4RuntimeWriteException(ex) from ex
+
+    @parse_p4runtime_write_error
+    def insert(self, entity):
+        """
+        Perform an insert write operation.
+
+        :param entity: P4 entity to insert
+        :return: void
+        """
+        return self.__write(entity, WriteOperation.insert)
+
+    @parse_p4runtime_write_error
+    def update(self, entity):
+        """
+        Perform an update write operation.
+
+        :param entity: P4 entity to update
+        :return: void
+        """
+        return self.__write(entity, WriteOperation.update)
+
+    @parse_p4runtime_write_error
+    def delete(self, entity):
+        """
+        Perform a delete write operation.
+
+        :param entity: P4 entity to delete
+        :return: void
+        """
+        return self.__write(entity, WriteOperation.delete)
+
+    @parse_p4runtime_write_error
+    def write(self, req):
+        """
+        Write device operation.
+
+        :param req: write request message
+        :return: status
+        """
+        req.device_id = self.device_id
+        if self.role_name is not None:
+            req.role = self.role_name
+        election_id = req.election_id
+        election_id.high = self.election_id[0]
+        election_id.low = self.election_id[1]
+        return self.__simple_write(req)
+
+    @parse_p4runtime_write_error
+    def write_update(self, update):
+        """
+        Update device operation.
+
+        :param update: update request message
+        :return: status
+        """
+        req = self.__get_new_write_request()
+        req.updates.extend([update])
+        return self.__simple_write(req)
+
+    # Decorator is useless here: in case of server error,
+    # the exception is raised during the iteration (when next() is called).
+    @parse_p4runtime_error
+    def read_one(self, entity):
+        """
+        Read device operation.
+
+        :param entity: P4 entity for which the read is issued
+        :return: status
+        """
+        req = p4runtime_pb2.ReadRequest()
+        if self.role_name is not None:
+            req.role = self.role_name
+        req.device_id = self.device_id
+        req.entities.extend([entity])
+        return self.stub.Read(req)
+
+    @parse_p4runtime_error
+    def api_version(self):
+        """
+        P4Runtime API version.
+
+        :return: API version hex
+        """
+        req = p4runtime_pb2.CapabilitiesRequest()
+        rep = self.stub.Capabilities(req)
+        return rep.p4runtime_api_version
diff --git a/src/device/service/drivers/p4/p4_common.py b/src/device/service/drivers/p4/p4_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcafedc1f613bfe1d1739d72f89803155b720155
--- /dev/null
+++ b/src/device/service/drivers/p4/p4_common.py
@@ -0,0 +1,445 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This package contains several helper functions for encoding to and decoding from
+byte strings:
+- integers
+- IPv4 address strings
+- IPv6 address strings
+- Ethernet address strings
+as well as static variables used by various P4 driver components.
+"""
+
+import logging
+import math
+import re
+import socket
+import ipaddress
+from ctypes import c_uint16, sizeof
+import macaddress
+
+from common.type_checkers.Checkers import chk_type
+try:
+    from .p4_exception import UserBadValueError
+except ImportError:
+    from p4_exception import UserBadValueError
+
+P4_ATTR_DEV_ID = "id"
+P4_ATTR_DEV_NAME = "name"
+P4_ATTR_DEV_VENDOR = "vendor"
+P4_ATTR_DEV_HW_VER = "hw_ver"
+P4_ATTR_DEV_SW_VER = "sw_ver"
+P4_ATTR_DEV_P4BIN = "p4bin"
+P4_ATTR_DEV_P4INFO = "p4info"
+P4_ATTR_DEV_TIMEOUT = "timeout"
+
+P4_VAL_DEF_VENDOR = "Unknown"
+P4_VAL_DEF_HW_VER = "BMv2 simple_switch"
+P4_VAL_DEF_SW_VER = "Stratum"
+P4_VAL_DEF_TIMEOUT = 60
+
+
+# Logger instance
+LOGGER = logging.getLogger(__name__)
+
+
+# MAC address encoding/decoding
+mac_pattern = re.compile(r"^([\da-fA-F]{2}:){5}([\da-fA-F]{2})$")
+
+
+def matches_mac(mac_addr_string):
+    """
+    Check whether input string is a valid MAC address or not.
+
+    :param mac_addr_string: string-based MAC address
+    :return: boolean status
+    """
+    return mac_pattern.match(mac_addr_string) is not None
+
+
+def encode_mac(mac_addr_string):
+    """
+    Convert string-based MAC address into bytes.
+
+    :param mac_addr_string: string-based MAC address
+    :return: MAC address in bytes
+    """
+    return bytes(macaddress.MAC(mac_addr_string))
+
+
+def decode_mac(encoded_mac_addr):
+    """
+    Convert a MAC address in bytes into string-based MAC address.
+
+    :param encoded_mac_addr: MAC address in bytes
+    :return: string-based MAC address
+    """
+    return str(macaddress.MAC(encoded_mac_addr)).replace("-", ":").lower()
+
+
+# IP address encoding/decoding
+IPV4_LOCALHOST = "localhost"
+
+
+def matches_ipv4(ip_addr_string):
+    """
+    Check whether input string is a valid IPv4 address or not.
+
+    :param ip_addr_string: string-based IPv4 address
+    :return: boolean status
+    """
+    if ip_addr_string == IPV4_LOCALHOST:
+        return True
+    try:
+        addr = ipaddress.ip_address(ip_addr_string)
+        return isinstance(addr, ipaddress.IPv4Address)
+    except ValueError:
+        return False
+
+
+def encode_ipv4(ip_addr_string):
+    """
+    Convert string-based IPv4 address into bytes.
+
+    :param ip_addr_string: string-based IPv4 address
+    :return: IPv4 address in bytes
+    """
+    return socket.inet_aton(ip_addr_string)
+
+
+def decode_ipv4(encoded_ip_addr):
+    """
+    Convert an IPv4 address in bytes into string-based IPv4 address.
+
+    :param encoded_ip_addr: IPv4 address in bytes
+    :return: string-based IPv4 address
+    """
+    return socket.inet_ntoa(encoded_ip_addr)
+
+
+def matches_ipv6(ip_addr_string):
+    """
+    Check whether input string is a valid IPv6 address or not.
+
+    :param ip_addr_string: string-based IPv6 address
+    :return: boolean status
+    """
+    try:
+        addr = ipaddress.ip_address(ip_addr_string)
+        return isinstance(addr, ipaddress.IPv6Address)
+    except ValueError:
+        return False
+
+
+def encode_ipv6(ip_addr_string):
+    """
+    Convert string-based IPv6 address into bytes.
+
+    :param ip_addr_string: string-based IPv6 address
+    :return: IPv6 address in bytes
+    """
+    return socket.inet_pton(socket.AF_INET6, ip_addr_string)
+
+
+def decode_ipv6(encoded_ip_addr):
+    """
+    Convert an IPv6 address in bytes into string-based IPv6 address.
+
+    :param encoded_ip_addr: IPv6 address in bytes
+    :return: string-based IPv6 address
+    """
+    return str(ipaddress.ip_address(encoded_ip_addr))
+
+
+# Numerical encoding/decoding
+
+
+def limits(c_int_type):
+    """
+    Discover limits of numerical type.
+
+    :param c_int_type: numerical type
+    :return: tuple of numerical type's limits
+    """
+    signed = c_int_type(-1).value < c_int_type(0).value
+    bit_size = sizeof(c_int_type) * 8
+    signed_limit = 2 ** (bit_size - 1)
+    return (-signed_limit, signed_limit - 1) \
+        if signed else (0, 2 * signed_limit - 1)
+
+
+def valid_port(port):
+    """
+    Check whether input is a valid port number or not.
+
+    :param port: port number
+    :return: boolean status
+    """
+    lim = limits(c_uint16)
+    return lim[0] <= port <= lim[1]
+
+
+def bitwidth_to_bytes(bitwidth):
+    """
+    Convert number of bits to number of bytes.
+
+    :param bitwidth: number of bits
+    :return: number of bytes
+    """
+    return int(math.ceil(bitwidth / 8.0))
+
+
+def encode_num(number, bitwidth):
+    """
+    Convert number into bytes.
+
+    :param number: number to convert
+    :param bitwidth: number of bits
+    :return: number in bytes
+    """
+    byte_len = bitwidth_to_bytes(bitwidth)
+    return number.to_bytes(byte_len, byteorder="big")
+
+
+def decode_num(encoded_number):
+    """
+    Convert number in bytes into its numerical form.
+
+    :param encoded_number: number in bytes to convert
+    :return: numerical number form
+    """
+    return int.from_bytes(encoded_number, "big")
+
+
+# Umbrella encoder
+
+
+def encode(variable, bitwidth):
+    """
+    Tries to infer the type of `input` and encode it.
+
+    :param variable: target variable
+    :param bitwidth: size of variable in bits
+    :return: encoded bytes
+    """
+    byte_len = bitwidth_to_bytes(bitwidth)
+    if isinstance(variable, (list, tuple)) and len(variable) == 1:
+        variable = variable[0]
+
+    if isinstance(variable, int):
+        encoded_bytes = encode_num(variable, bitwidth)
+    elif isinstance(variable, str):
+        if matches_mac(variable):
+            encoded_bytes = encode_mac(variable)
+        elif matches_ipv4(variable):
+            encoded_bytes = encode_ipv4(variable)
+        elif matches_ipv6(variable):
+            encoded_bytes = encode_ipv6(variable)
+        else:
+            try:
+                value = int(variable, 0)
+            except ValueError as ex:
+                raise UserBadValueError(
+                    f"Invalid value '{variable}': "
+                    "could not cast to integer, try in hex with 0x prefix")\
+                    from ex
+            encoded_bytes = value.to_bytes(byte_len, byteorder="big")
+    else:
+        raise Exception(
+            f"Encoding objects of {type(variable)} is not supported")
+    assert len(encoded_bytes) == byte_len
+    return encoded_bytes
+
+
+# Parsers
+
+
+def get_match_field_value(match_field):
+    """
+    Retrieve the value of a certain match field by name.
+
+    :param match_field: match field
+    :return: match filed value
+    """
+    match_type = match_field.WhichOneof("field_match_type")
+    if match_type == "valid":
+        return match_field.valid.value
+    if match_type == "exact":
+        return match_field.exact.value
+    if match_type == "lpm":
+        return match_field.lpm.value, match_field.lpm.prefix_len
+    if match_type == "ternary":
+        return match_field.ternary.value, match_field.ternary.mask
+    if match_type == "range":
+        return match_field.range.low, match_field.range.high
+    raise Exception(f"Unsupported match type with type {match_type}")
+
+
+def parse_resource_string_from_json(resource, resource_str="table-name"):
+    """
+    Parse a given resource name within a JSON-based object.
+
+    :param resource: JSON-based object
+    :param resource_str: resource string to parse
+    :return: value of the parsed resource string
+    """
+    if not resource or (resource_str not in resource):
+        LOGGER.warning("JSON entry misses '%s' attribute", resource_str)
+        return None
+    chk_type(resource_str, resource[resource_str], str)
+    return resource[resource_str]
+
+
+def parse_resource_number_from_json(resource, resource_nb):
+    """
+    Parse a given resource number within a JSON-based object.
+
+    :param resource: JSON-based object
+    :param resource_nb: resource number to parse
+    :return: value of the parsed resource number
+    """
+    if not resource or (resource_nb not in resource):
+        LOGGER.warning(
+            "JSON entry misses '%s' attribute", resource_nb)
+        return None
+    chk_type(resource_nb, resource[resource_nb], int)
+    return resource[resource_nb]
+
+
+def parse_resource_integer_from_json(resource, resource_nb):
+    """
+    Parse a given integer number within a JSON-based object.
+
+    :param resource: JSON-based object
+    :param resource_nb: resource number to parse
+    :return: value of the parsed resource number
+    """
+    num = parse_resource_number_from_json(resource, resource_nb)
+    if num:
+        return int(num)
+    return -1
+
+
+def parse_resource_float_from_json(resource, resource_nb):
+    """
+    Parse a given floating point number within a JSON-based object.
+
+    :param resource: JSON-based object
+    :param resource_nb: resource number to parse
+    :return: value of the parsed resource number
+    """
+    num = parse_resource_number_from_json(resource, resource_nb)
+    if num:
+        return float(num)
+    return -1.0
+
+
+def parse_resource_bytes_from_json(resource, resource_bytes):
+    """
+    Parse given resource bytes within a JSON-based object.
+
+    :param resource: JSON-based object
+    :param resource_bytes: resource bytes to parse
+    :return: value of the parsed resource bytes
+    """
+    if not resource or (resource_bytes not in resource):
+        LOGGER.debug(
+            "JSON entry misses '%s' attribute", resource_bytes)
+        return None
+
+    if resource_bytes in resource:
+        chk_type(resource_bytes, resource[resource_bytes], bytes)
+        return resource[resource_bytes]
+    return None
+
+
+def parse_match_operations_from_json(resource):
+    """
+    Parse the match operations within a JSON-based object.
+
+    :param resource: JSON-based object
+    :return: map of match operations
+    """
+    if not resource or ("match-fields" not in resource):
+        LOGGER.warning(
+            "JSON entry misses 'match-fields' list of attributes")
+        return {}
+    chk_type("match-fields", resource["match-fields"], list)
+
+    match_map = {}
+    for mf_entry in resource["match-fields"]:
+        if ("match-field" not in mf_entry) or \
+                ("match-value" not in mf_entry):
+            LOGGER.warning(
+                "JSON entry misses 'match-field' and/or "
+                "'match-value' attributes")
+            return None
+        chk_type("match-field", mf_entry["match-field"], str)
+        chk_type("match-value", mf_entry["match-value"], str)
+        match_map[mf_entry["match-field"]] = mf_entry["match-value"]
+
+    return match_map
+
+
+def parse_action_parameters_from_json(resource):
+    """
+    Parse the action parameters within a JSON-based object.
+
+    :param resource: JSON-based object
+    :return: map of action parameters
+    """
+    if not resource or ("action-params" not in resource):
+        LOGGER.warning(
+            "JSON entry misses 'action-params' list of attributes")
+        return None
+    chk_type("action-params", resource["action-params"], list)
+
+    action_name = parse_resource_string_from_json(resource, "action-name")
+
+    action_params = {}
+    for ac_entry in resource["action-params"]:
+        if not ac_entry:
+            LOGGER.debug(
+                "Missing action parameter for action %s", action_name)
+            continue
+        chk_type("action-param", ac_entry["action-param"], str)
+        chk_type("action-value", ac_entry["action-value"], str)
+        action_params[ac_entry["action-param"]] = \
+            ac_entry["action-value"]
+
+    return action_params
+
+
+def parse_integer_list_from_json(resource, resource_list, resource_item):
+    """
+    Parse the list of integers within a JSON-based object.
+
+    :param resource: JSON-based object
+    :param resource_list: name of the resource list
+    :param resource_item: name of the resource item
+    :return: list of integers
+    """
+    if not resource or (resource_list not in resource):
+        LOGGER.warning(
+            "JSON entry misses '%s' list of attributes", resource_list)
+        return []
+    chk_type(resource_list, resource[resource_list], list)
+
+    integers_list = []
+    for item in resource[resource_list]:
+        chk_type(resource_item, item[resource_item], int)
+        integers_list.append(item[resource_item])
+
+    return integers_list
diff --git a/src/device/service/drivers/p4/p4_context.py b/src/device/service/drivers/p4/p4_context.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab01c422fe478cfe26c2f7331fc9b4653521db9f
--- /dev/null
+++ b/src/device/service/drivers/p4/p4_context.py
@@ -0,0 +1,284 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Build some context around a given P4 info file.
+"""
+
+from collections import Counter
+import enum
+from functools import partialmethod
+
+
+@enum.unique
+class P4Type(enum.Enum):
+    """
+    P4 types.
+    """
+    table = 1
+    action = 2
+    action_profile = 3
+    counter = 4
+    direct_counter = 5
+    meter = 6
+    direct_meter = 7
+    controller_packet_metadata = 8
+
+
+P4Type.table.p4info_name = "tables"
+P4Type.action.p4info_name = "actions"
+P4Type.action_profile.p4info_name = "action_profiles"
+P4Type.counter.p4info_name = "counters"
+P4Type.direct_counter.p4info_name = "direct_counters"
+P4Type.meter.p4info_name = "meters"
+P4Type.direct_meter.p4info_name = "direct_meters"
+P4Type.controller_packet_metadata.p4info_name = "controller_packet_metadata"
+
+for object_type in P4Type:
+    object_type.pretty_name = object_type.name.replace('_', ' ')
+    object_type.pretty_names = object_type.pretty_name + 's'
+
+
+@enum.unique
+class P4RuntimeEntity(enum.Enum):
+    """
+    P4 runtime entities.
+    """
+    table_entry = 1
+    action_profile_member = 2
+    action_profile_group = 3
+    meter_entry = 4
+    direct_meter_entry = 5
+    counter_entry = 6
+    direct_counter_entry = 7
+    packet_replication_engine_entry = 8
+
+
+class Context:
+    """
+    P4 context.
+    """
+    def __init__(self):
+        self.p4info = None
+        self.p4info_obj_map = {}
+        self.p4info_obj_map_by_id = {}
+        self.p4info_objs_by_type = {}
+
+    def set_p4info(self, p4info):
+        """
+        Set a p4 info file.
+
+        :param p4info: p4 info file
+        :return: void
+        """
+        self.p4info = p4info
+        self._import_p4info_names()
+
+    def get_obj(self, obj_type, name):
+        """
+        Retrieve an object by type and name.
+
+        :param obj_type: P4 object type
+        :param name: P4 object name
+        :return: P4 object
+        """
+        key = (obj_type, name)
+        return self.p4info_obj_map.get(key, None)
+
+    def get_obj_id(self, obj_type, name):
+        """
+        Retrieve a P4 object's ID by type and name.
+
+        :param obj_type: P4 object type
+        :param name: P4 object name
+        :return: P4 object ID
+        """
+        obj = self.get_obj(obj_type, name)
+        if obj is None:
+            return None
+        return obj.preamble.id
+
+    def get_param(self, action_name, name):
+        """
+        Get an action parameter by action name.
+
+        :param action_name: P4 action name
+        :param name: action parameter name
+        :return: action parameter
+        """
+        action = self.get_obj(P4Type.action, action_name)
+        if action is None:
+            return None
+        for param in action.params:
+            if param.name == name:
+                return param
+        return None
+
+    def get_mf(self, table_name, name):
+        """
+        Get a table's match field by name.
+
+        :param table_name: P4 table name
+        :param name: match field name
+        :return: match field
+        """
+        table = self.get_obj(P4Type.table, table_name)
+        if table is None:
+            return None
+        for match_field in table.match_fields:
+            if match_field.name == name:
+                return match_field
+        return None
+
+    def get_param_id(self, action_name, name):
+        """
+        Get an action parameter ID by the action and parameter names.
+
+        :param action_name: P4 action name
+        :param name: action parameter name
+        :return: action parameter ID
+        """
+        param = self.get_param(action_name, name)
+        return None if param is None else param.id
+
+    def get_mf_id(self, table_name, name):
+        """
+        Get a table's match field ID by name.
+
+        :param table_name: P4 table name
+        :param name: match field name
+        :return: match field ID
+        """
+        match_field = self.get_mf(table_name, name)
+        return None if match_field is None else match_field.id
+
+    def get_param_name(self, action_name, id_):
+        """
+        Get an action parameter name by the action name and action ID.
+
+        :param action_name: P4 action name
+        :param id_: action parameter ID
+        :return: action parameter name
+        """
+        action = self.get_obj(P4Type.action, action_name)
+        if action is None:
+            return None
+        for param in action.params:
+            if param.id == id_:
+                return param.name
+        return None
+
+    def get_mf_name(self, table_name, id_):
+        """
+        Get a table's match field name by ID.
+
+        :param table_name: P4 table name
+        :param id_: match field ID
+        :return: match field name
+        """
+        table = self.get_obj(P4Type.table, table_name)
+        if table is None:
+            return None
+        for match_field in table.match_fields:
+            if match_field.id == id_:
+                return match_field.name
+        return None
+
+    def get_objs(self, obj_type):
+        """
+        Get P4 objects by type.
+
+        :param obj_type: P4 object type
+        :return: list of tuples (object name, object)
+        """
+        objects = self.p4info_objs_by_type[obj_type]
+        for name, obj in objects.items():
+            yield name, obj
+
+    def get_name_from_id(self, id_):
+        """
+        Get P4 object name by its ID.
+
+        :param id_: P4 object ID
+        :return: P4 object name
+        """
+        return self.p4info_obj_map_by_id[id_].preamble.name
+
+    def get_obj_by_id(self, id_):
+        """
+        Get P4 object by its ID.
+
+        :param id_: P4 object ID
+        :return: P4 object
+        """
+        return self.p4info_obj_map_by_id[id_]
+
+    def get_packet_metadata_name_from_id(self, ctrl_pkt_md_name, id_):
+        """
+        Get packet metadata name by ID.
+
+        :param ctrl_pkt_md_name: packet replication entity name
+        :param id_: packet metadata ID
+        :return: packet metadata name
+        """
+        ctrl_pkt_md = self.get_obj(
+            P4Type.controller_packet_metadata, ctrl_pkt_md_name)
+        if not ctrl_pkt_md:
+            return None
+        for meta in ctrl_pkt_md.metadata:
+            if meta.id == id_:
+                return meta.name
+        return None
+
+    # We accept any suffix that uniquely identifies the object
+    # among p4info objects of the same type.
+    def _import_p4info_names(self):
+        """
+        Import p4 info into memory.
+
+        :return: void
+        """
+        suffix_count = Counter()
+        for obj_type in P4Type:
+            self.p4info_objs_by_type[obj_type] = {}
+            for obj in getattr(self.p4info, obj_type.p4info_name):
+                pre = obj.preamble
+                self.p4info_obj_map_by_id[pre.id] = obj
+                self.p4info_objs_by_type[obj_type][pre.name] = obj
+                suffix = None
+                for suf in reversed(pre.name.split(".")):
+                    suffix = suf if suffix is None else suf + "." + suffix
+                    key = (obj_type, suffix)
+                    self.p4info_obj_map[key] = obj
+                    suffix_count[key] += 1
+        for key, cnt in suffix_count.items():
+            if cnt > 1:
+                del self.p4info_obj_map[key]
+
+
+# Add p4info object and object id "getters" for each object type;
+# these are just wrappers around Context.get_obj and Context.get_obj_id.
+# For example: get_table(x) and get_table_id(x) respectively call
+# get_obj(P4Type.table, x) and get_obj_id(P4Type.table, x)
+for object_type in P4Type:
+    object_name = "_".join(["get", object_type.name])
+    setattr(Context, object_name, partialmethod(
+        Context.get_obj, object_type))
+    object_name = "_".join(["get", object_type.name, "id"])
+    setattr(Context, object_name, partialmethod(
+        Context.get_obj_id, object_type))
+
+for object_type in P4Type:
+    object_name = "_".join(["get", object_type.p4info_name])
+    setattr(Context, object_name, partialmethod(Context.get_objs, object_type))
diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py
index af05952b313d1632eacd5962cc34c4aa1b6b5a10..069c07ce40e43192b74519b2175e7e10c638cd20 100644
--- a/src/device/service/drivers/p4/p4_driver.py
+++ b/src/device/service/drivers/p4/p4_driver.py
@@ -16,13 +16,22 @@
 P4 driver plugin for the TeraFlow SDN controller.
 """
 
+import os
+import json
 import logging
 import threading
 from typing import Any, Iterator, List, Optional, Tuple, Union
-from .p4_util import P4RuntimeClient,\
+from common.type_checkers.Checkers import chk_type, chk_length, chk_string
+from .p4_common import matches_ipv4, matches_ipv6, valid_port,\
     P4_ATTR_DEV_ID, P4_ATTR_DEV_NAME, P4_ATTR_DEV_VENDOR,\
-    P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER, P4_ATTR_DEV_PIPECONF,\
-    P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER, P4_VAL_DEF_PIPECONF
+    P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER,\
+    P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\
+    P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER,\
+    P4_VAL_DEF_TIMEOUT
+from .p4_manager import P4Manager, get_api_version, KEY_TABLE,\
+    KEY_ACTION_PROFILE, KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\
+    KEY_CTL_PKT_METADATA
+from .p4_client import WriteOperation
 
 try:
     from _Driver import _Driver
@@ -53,208 +62,543 @@ class P4Driver(_Driver):
             Hardware version of the P4 device (Optional)
         sw_ver : str
             Software version of the P4 device (Optional)
-        pipeconf : str
-            P4 device table configuration (Optional)
+        p4bin : str
+            Path to P4 binary file (Optional, but must be combined with p4info)
+        p4info : str
+            Path to P4 info file (Optional, but must be combined with p4bin)
+        timeout : int
+            Device timeout in seconds (Optional)
     """
 
     def __init__(self, address: str, port: int, **settings) -> None:
         # pylint: disable=super-init-not-called
-        self.__client = None
+        self.__manager = None
         self.__address = address
         self.__port = int(port)
+        self.__endpoint = None
         self.__settings = settings
-
-        try:
-            self.__dev_id = self.__settings.get(P4_ATTR_DEV_ID)
-        except Exception as ex:
-            LOGGER.error('P4 device ID is a mandatory setting')
-            raise Exception from ex
-
-        if P4_ATTR_DEV_NAME in self.__settings:
-            self.__dev_name = self.__settings.get(P4_ATTR_DEV_NAME)
-        else:
-            self.__dev_name = str(self.__dev_id)
-            LOGGER.warning(
-                'No device name is provided. Setting default name: %s',
-                self.__dev_name)
-
-        if P4_ATTR_DEV_VENDOR in self.__settings:
-            self.__dev_vendor = self.__settings.get(P4_ATTR_DEV_VENDOR)
-        else:
-            self.__dev_vendor = P4_VAL_DEF_VENDOR
-            LOGGER.warning(
-                'No vendor is provided. Setting default vendor: %s',
-                self.__dev_vendor)
-
-        if P4_ATTR_DEV_HW_VER in self.__settings:
-            self.__dev_hw_version = self.__settings.get(P4_ATTR_DEV_HW_VER)
-        else:
-            self.__dev_hw_version = P4_VAL_DEF_HW_VER
-            LOGGER.warning(
-                'No HW version is provided. Setting default HW version: %s',
-                self.__dev_hw_version)
-
-        if P4_ATTR_DEV_SW_VER in self.__settings:
-            self.__dev_sw_version = self.__settings.get(P4_ATTR_DEV_SW_VER)
-        else:
-            self.__dev_sw_version = P4_VAL_DEF_SW_VER
-            LOGGER.warning(
-                'No SW version is provided. Setting default SW version: %s',
-                self.__dev_sw_version)
-
-        if P4_ATTR_DEV_PIPECONF in self.__settings:
-            self.__dev_pipeconf = self.__settings.get(P4_ATTR_DEV_PIPECONF)
-        else:
-            self.__dev_pipeconf = P4_VAL_DEF_PIPECONF
-            LOGGER.warning(
-                'No P4 pipeconf is provided. Setting default P4 pipeconf: %s',
-                self.__dev_pipeconf)
-
+        self.__id = None
+        self.__name = None
+        self.__vendor = P4_VAL_DEF_VENDOR
+        self.__hw_version = P4_VAL_DEF_HW_VER
+        self.__sw_version = P4_VAL_DEF_SW_VER
+        self.__p4bin_path = None
+        self.__p4info_path = None
+        self.__timeout = P4_VAL_DEF_TIMEOUT
         self.__lock = threading.Lock()
         self.__started = threading.Event()
         self.__terminate = threading.Event()
 
-        LOGGER.info('Initializing P4 device at %s:%d with settings:',
+        self.__parse_and_validate_settings()
+
+        LOGGER.info("Initializing P4 device at %s:%d with settings:",
                     self.__address, self.__port)
 
         for key, value in settings.items():
-            LOGGER.info('\t%8s = %s', key, value)
+            LOGGER.info("\t%8s = %s", key, value)
 
     def Connect(self) -> bool:
         """
-        Establishes a connection between the P4 device driver and a P4 device.
+        Establish a connection between the P4 device driver and a P4 device.
 
         :return: boolean connection status.
         """
-        LOGGER.info(
-            'Connecting to P4 device %s:%d ...',
-            self.__address, self.__port)
+        LOGGER.info("Connecting to P4 device %s ...", self.__endpoint)
 
         with self.__lock:
             # Skip if already connected
             if self.__started.is_set():
                 return True
 
-            # Instantiate a gRPC channel with the P4 device
-            grpc_address = f'{self.__address}:{self.__port}'
+            # Dynamically devise an election ID
             election_id = (1, 0)
-            self.__client = P4RuntimeClient(
-                self.__dev_id, grpc_address, election_id)
-            LOGGER.info('\tConnected!')
+
+            # Spawn a P4 manager for this device
+            self.__manager = P4Manager(
+                device_id=self.__id,
+                ip_address=self.__address,
+                port=self.__port,
+                election_id=election_id)
+            assert self.__manager
+
+            # Start the P4 manager
+            try:
+                self.__manager.start(self.__p4bin_path, self.__p4info_path)
+            except Exception as ex:  # pylint: disable=broad-except
+                raise Exception(ex) from ex
+
+            LOGGER.info("\tConnected via P4Runtime version %s",
+                        get_api_version())
             self.__started.set()
 
             return True
 
     def Disconnect(self) -> bool:
         """
-        Terminates the connection between the P4 device driver and a P4 device.
+        Terminate the connection between the P4 device driver and a P4 device.
 
         :return: boolean disconnection status.
         """
-        LOGGER.info(
-            'Disconnecting from P4 device %s:%d ...',
-            self.__address, self.__port)
+        LOGGER.info("Disconnecting from P4 device %s ...", self.__endpoint)
 
         # If not started, assume it is already disconnected
         if not self.__started.is_set():
             return True
 
-        # gRPC client must already be instantiated
-        assert self.__client
+        # P4 manager must already be instantiated
+        assert self.__manager
 
         # Trigger termination of loops and processes
         self.__terminate.set()
 
         # Trigger connection tear down with the P4Runtime server
-        self.__client.tear_down()
-        self.__client = None
+        self.__manager.stop()
+        self.__manager = None
 
-        LOGGER.info('\tDisconnected!')
+        LOGGER.info("\tDisconnected!")
 
         return True
 
     def GetInitialConfig(self) -> List[Tuple[str, Any]]:
         """
-        Retrieves the initial configuration of a P4 device.
+        Retrieve the initial configuration of a P4 device.
 
         :return: list of initial configuration items.
         """
-        LOGGER.info('P4 GetInitialConfig()')
-        return []
+        initial_conf = []
 
-    def GetConfig(self, resource_keys : List[str] = [])\
+        with self.__lock:
+            if not initial_conf:
+                LOGGER.warning("No initial configuration for P4 device %s ...",
+                               self.__endpoint)
+            return []
+
+    def GetConfig(self, resource_keys: List[str] = [])\
             -> List[Tuple[str, Union[Any, None, Exception]]]:
         """
-        Retrieves the current configuration of a P4 device.
+        Retrieve the current configuration of a P4 device.
 
-        :param resource_keys: configuration parameters to retrieve.
-        :return: list of values associated with the requested resource keys.
+        :param resource_keys: P4 resource keys to retrieve.
+        :return: list of values associated with the requested resource keys or
+        None/Exception.
         """
+        LOGGER.info(
+            "Getting configuration from P4 device %s ...", self.__endpoint)
 
-        LOGGER.info('P4 GetConfig()')
-        return []
+        # No resource keys means fetch all configuration
+        if len(resource_keys) == 0:
+            LOGGER.warning(
+                "GetConfig with no resource keys "
+                "implies getting all resource keys!")
+            resource_keys = [
+                obj_name for obj_name, _ in self.__manager.p4_objects.items()
+            ]
+
+        # Verify the input type
+        chk_type("resources", resource_keys, list)
+
+        with self.__lock:
+            return self.__get_resources(resource_keys)
 
-    def SetConfig(self, resources : List[Tuple[str, Any]])\
+    def SetConfig(self, resources: List[Tuple[str, Any]])\
             -> List[Union[bool, Exception]]:
         """
-        Submits a new configuration to a P4 device.
+        Submit a new configuration to a P4 device.
 
-        :param resources: configuration parameters to set.
-        :return: list of results for resource key changes requested.
+        :param resources: P4 resources to set.
+        :return: list of boolean results or Exceptions for resource key
+        changes requested.
         """
-        LOGGER.info('P4 SetConfig()')
-        return []
+        LOGGER.info(
+            "Setting configuration to P4 device %s ...", self.__endpoint)
 
-    def DeleteConfig(self, resources : List[Tuple[str, Any]])\
+        if not resources or len(resources) == 0:
+            LOGGER.warning(
+                "SetConfig requires a list of resources to store "
+                "into the device. Nothing is provided though.")
+            return []
+
+        assert isinstance(resources, list)
+
+        with self.__lock:
+            return self.__set_resources(resources)
+
+    def DeleteConfig(self, resources: List[Tuple[str, Any]])\
             -> List[Union[bool, Exception]]:
         """
-        Revokes P4 device configuration.
+        Revoke P4 device configuration.
 
         :param resources: list of tuples with resource keys to be deleted.
-        :return: list of results for resource key deletions requested.
+        :return: list of boolean results or Exceptions for resource key
+        deletions requested.
         """
-        LOGGER.info('P4 DeleteConfig()')
-        return []
+        LOGGER.info(
+            "Deleting configuration from P4 device %s ...", self.__endpoint)
+
+        if not resources or len(resources) == 0:
+            LOGGER.warning(
+                "DeleteConfig requires a list of resources to delete "
+                "from the device. Nothing is provided though.")
+            return []
 
-    def GetResource(self, endpoint_uuid : str) -> Optional[str]:
+        with self.__lock:
+            return self.__delete_resources(resources)
+
+    def GetResource(self, endpoint_uuid: str) -> Optional[str]:
         """
-        Retrieves a certain resource from a P4 device.
+        Retrieve a certain resource from a P4 device.
 
         :param endpoint_uuid: target endpoint UUID.
         :return: The path of the endpoint or None if not found.
         """
-        LOGGER.info('P4 GetResource()')
+        LOGGER.warning("GetResource() RPC not yet implemented by the P4 driver")
         return ""
 
-    def GetState(self, blocking=False, terminate : Optional[threading.Event] = None) -> Iterator[Tuple[str, Any]]:
+    def GetState(self,
+                 blocking=False,
+                 terminate: Optional[threading.Event] = None) -> \
+                 Iterator[Tuple[str, Any]]:
         """
-        Retrieves the state of a P4 device.
+        Retrieve the state of a P4 device.
 
         :param blocking: if non-blocking, the driver terminates the loop and
         returns.
+        :param terminate: termination flag.
         :return: sequences of state sample.
         """
-        LOGGER.info('P4 GetState()')
+        LOGGER.warning("GetState() RPC not yet implemented by the P4 driver")
         return []
 
-    def SubscribeState(self, subscriptions : List[Tuple[str, float, float]])\
+    def SubscribeState(self, subscriptions: List[Tuple[str, float, float]])\
             -> List[Union[bool, Exception]]:
         """
-        Subscribes to certain state information.
+        Subscribe to certain state information.
 
         :param subscriptions: list of tuples with resources to be subscribed.
         :return: list of results for resource subscriptions requested.
         """
-        LOGGER.info('P4 SubscribeState()')
-        return []
+        LOGGER.warning(
+            "SubscribeState() RPC not yet implemented by the P4 driver")
+        return [False for _ in subscriptions]
 
-    def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]])\
+    def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]])\
             -> List[Union[bool, Exception]]:
         """
-        Unsubscribes from certain state information.
+        Unsubscribe from certain state information.
 
         :param subscriptions: list of tuples with resources to be unsubscribed.
         :return: list of results for resource un-subscriptions requested.
         """
-        LOGGER.info('P4 UnsubscribeState()')
-        return []
+        LOGGER.warning(
+            "UnsubscribeState() RPC not yet implemented by the P4 driver")
+        return [False for _ in subscriptions]
+
+    def get_manager(self):
+        """
+        Get an instance of the P4 manager.
+
+        :return: P4 manager instance
+        """
+        return self.__manager
+
+    def __parse_and_validate_settings(self):
+        """
+        Verify that the driver inputs comply to what is expected.
+
+        :return: void or exception in case of validation error
+        """
+        # Device endpoint information
+        assert matches_ipv4(self.__address) or (matches_ipv6(self.__address)),\
+            f"{self.__address} not a valid IPv4 or IPv6 address"
+        assert valid_port(self.__port), \
+            f"{self.__port} not a valid transport port"
+        self.__endpoint = f"{self.__address}:{self.__port}"
+
+        # Device ID
+        try:
+            self.__id = self.__settings.get(P4_ATTR_DEV_ID)
+        except Exception as ex:
+            LOGGER.error("P4 device ID is a mandatory setting")
+            raise Exception from ex
+
+        # Device name
+        if P4_ATTR_DEV_NAME in self.__settings:
+            self.__name = self.__settings.get(P4_ATTR_DEV_NAME)
+        else:
+            self.__name = str(self.__id)
+            LOGGER.warning(
+                "No device name is provided. Setting default name: %s",
+                self.__name)
+
+        # Device vendor
+        if P4_ATTR_DEV_VENDOR in self.__settings:
+            self.__vendor = self.__settings.get(P4_ATTR_DEV_VENDOR)
+        else:
+            LOGGER.warning(
+                "No device vendor is provided. Setting default vendor: %s",
+                self.__vendor)
+
+        # Device hardware version
+        if P4_ATTR_DEV_HW_VER in self.__settings:
+            self.__hw_version = self.__settings.get(P4_ATTR_DEV_HW_VER)
+        else:
+            LOGGER.warning(
+                "No HW version is provided. Setting default HW version: %s",
+                self.__hw_version)
+
+        # Device software version
+        if P4_ATTR_DEV_SW_VER in self.__settings:
+            self.__sw_version = self.__settings.get(P4_ATTR_DEV_SW_VER)
+        else:
+            LOGGER.warning(
+                "No SW version is provided. Setting default SW version: %s",
+                self.__sw_version)
+
+        # Path to P4 binary file
+        if P4_ATTR_DEV_P4BIN in self.__settings:
+            self.__p4bin_path = self.__settings.get(P4_ATTR_DEV_P4BIN)
+            assert os.path.exists(self.__p4bin_path),\
+                "Invalid path to p4bin file"
+            assert P4_ATTR_DEV_P4INFO in self.__settings,\
+                "p4info and p4bin settings must be provided together"
+
+        # Path to P4 info file
+        if P4_ATTR_DEV_P4INFO in self.__settings:
+            self.__p4info_path = self.__settings.get(P4_ATTR_DEV_P4INFO)
+            assert os.path.exists(self.__p4info_path),\
+                "Invalid path to p4info file"
+            assert P4_ATTR_DEV_P4BIN in self.__settings,\
+                "p4info and p4bin settings must be provided together"
+
+        if (not self.__p4bin_path) or (not self.__p4info_path):
+            LOGGER.warning(
+                "No P4 binary and info files are provided, hence "
+                "no pipeline will be installed on the whitebox device.\n"
+                "This driver will attempt to manage whatever pipeline "
+                "is available on the target device.")
+
+        # Device timeout
+        if P4_ATTR_DEV_TIMEOUT in self.__settings:
+            self.__timeout = self.__settings.get(P4_ATTR_DEV_TIMEOUT)
+            assert self.__timeout > 0,\
+                "Device timeout must be a positive integer"
+        else:
+            LOGGER.warning(
+                "No device timeout is provided. Setting default timeout: %s",
+                self.__timeout)
+
+    def __get_resources(self, resource_keys):
+        """
+        Retrieve the current configuration of a P4 device.
+
+        :param resource_keys: P4 resource keys to retrieve.
+        :return: list of values associated with the requested resource keys or
+        None/Exception.
+        """
+        resources = []
+
+        LOGGER.debug("GetConfig() -> Keys: %s", resource_keys)
+
+        for resource_key in resource_keys:
+            entries = []
+            try:
+                if KEY_TABLE == resource_key:
+                    for table_name in self.__manager.get_table_names():
+                        t_entries = self.__manager.table_entries_to_json(
+                            table_name)
+                        if t_entries:
+                            entries.append(t_entries)
+                elif KEY_COUNTER == resource_key:
+                    for cnt_name in self.__manager.get_counter_names():
+                        c_entries = self.__manager.counter_entries_to_json(
+                            cnt_name)
+                        if c_entries:
+                            entries.append(c_entries)
+                elif KEY_DIR_COUNTER == resource_key:
+                    for d_cnt_name in self.__manager.get_direct_counter_names():
+                        dc_entries = \
+                            self.__manager.direct_counter_entries_to_json(
+                                d_cnt_name)
+                        if dc_entries:
+                            entries.append(dc_entries)
+                elif KEY_METER == resource_key:
+                    for meter_name in self.__manager.get_meter_names():
+                        m_entries = self.__manager.meter_entries_to_json(
+                            meter_name)
+                        if m_entries:
+                            entries.append(m_entries)
+                elif KEY_DIR_METER == resource_key:
+                    for d_meter_name in self.__manager.get_direct_meter_names():
+                        dm_entries = \
+                            self.__manager.direct_meter_entries_to_json(
+                                d_meter_name)
+                        if dm_entries:
+                            entries.append(dm_entries)
+                elif KEY_ACTION_PROFILE == resource_key:
+                    for ap_name in self.__manager.get_action_profile_names():
+                        ap_entries = \
+                            self.__manager.action_prof_member_entries_to_json(
+                                ap_name)
+                        if ap_entries:
+                            entries.append(ap_entries)
+                elif KEY_CTL_PKT_METADATA == resource_key:
+                    msg = f"{resource_key.capitalize()} is not a " \
+                          f"retrievable resource"
+                    raise Exception(msg)
+                else:
+                    msg = f"GetConfig failed due to invalid " \
+                          f"resource key: {resource_key}"
+                    raise Exception(msg)
+                resources.append(
+                    (resource_key, entries if entries else None)
+                )
+            except Exception as ex:  # pylint: disable=broad-except
+                resources.append((resource_key, ex))
+
+        return resources
+
+    def __set_resources(self, resources):
+        """
+        Submit a new configuration to a P4 device.
+
+        :param resources: P4 resources to set.
+        :return: list of boolean results or Exceptions for resource key
+        changes requested.
+        """
+        results = []
+
+        for i, resource in enumerate(resources):
+            str_resource_name = f"resources[#{i}]"
+            resource_key = ""
+            try:
+                chk_type(
+                    str_resource_name, resource, (list, tuple))
+                chk_length(
+                    str_resource_name, resource, min_length=2, max_length=2)
+                resource_key, resource_value = resource
+                chk_string(
+                    str_resource_name, resource_key, allow_empty=False)
+            except Exception as e:  # pylint: disable=broad-except
+                LOGGER.exception(
+                    "Exception validating %s: %s",
+                    str_resource_name, str(resource_key))
+                results.append(e)  # store the exception if validation fails
+                continue
+
+            try:
+                resource_value = json.loads(resource_value)
+            except Exception:  # pylint: disable=broad-except
+                pass
+
+            LOGGER.debug(
+                "SetConfig() -> Key: %s - Value: %s",
+                resource_key, resource_value)
+
+            # Default operation is insert.
+            # P4 manager has internal logic to judge whether an entry
+            # to be inserted already exists, thus simply needs an update.
+            operation = WriteOperation.insert
+
+            try:
+                self.__apply_operation(resource_key, resource_value, operation)
+                results.append(True)
+            except Exception as ex:  # pylint: disable=broad-except
+                results.append(ex)
+
+        print(results)
+
+        return results
+
+    def __delete_resources(self, resources):
+        """
+        Revoke P4 device configuration.
+
+        :param resources: list of tuples with resource keys to be deleted.
+        :return: list of boolean results or Exceptions for resource key
+        deletions requested.
+        """
+        results = []
+
+        for i, resource in enumerate(resources):
+            str_resource_name = f"resources[#{i}]"
+            resource_key = ""
+            try:
+                chk_type(
+                    str_resource_name, resource, (list, tuple))
+                chk_length(
+                    str_resource_name, resource, min_length=2, max_length=2)
+                resource_key, resource_value = resource
+                chk_string(
+                    str_resource_name, resource_key, allow_empty=False)
+            except Exception as e:  # pylint: disable=broad-except
+                LOGGER.exception(
+                    "Exception validating %s: %s",
+                    str_resource_name, str(resource_key))
+                results.append(e)  # store the exception if validation fails
+                continue
+
+            try:
+                resource_value = json.loads(resource_value)
+            except Exception:  # pylint: disable=broad-except
+                pass
+
+            LOGGER.debug("DeleteConfig() -> Key: %s - Value: %s",
+                         resource_key, resource_value)
+
+            operation = WriteOperation.delete
+
+            try:
+                self.__apply_operation(resource_key, resource_value, operation)
+                results.append(True)
+            except Exception as ex:  # pylint: disable=broad-except
+                results.append(ex)
+
+        print(results)
+
+        return results
+
+    def __apply_operation(
+            self, resource_key, resource_value, operation: WriteOperation):
+        """
+        Apply a write operation to a P4 resource.
+
+        :param resource_key: P4 resource key
+        :param resource_value: P4 resource value in JSON format
+        :param operation: write operation (i.e., insert, update, delete)
+        to apply
+        :return: True if operation is successfully applied or raise Exception
+        """
+
+        # Apply settings to the various tables
+        if KEY_TABLE == resource_key:
+            self.__manager.table_entry_operation_from_json(
+                resource_value, operation)
+        elif KEY_COUNTER == resource_key:
+            self.__manager.counter_entry_operation_from_json(
+                resource_value, operation)
+        elif KEY_DIR_COUNTER == resource_key:
+            self.__manager.direct_counter_entry_operation_from_json(
+                resource_value, operation)
+        elif KEY_METER == resource_key:
+            self.__manager.meter_entry_operation_from_json(
+                resource_value, operation)
+        elif KEY_DIR_METER == resource_key:
+            self.__manager.direct_meter_entry_operation_from_json(
+                resource_value, operation)
+        elif KEY_ACTION_PROFILE == resource_key:
+            self.__manager.action_prof_member_entry_operation_from_json(
+                resource_value, operation)
+            self.__manager.action_prof_group_entry_operation_from_json(
+                resource_value, operation)
+        elif KEY_CTL_PKT_METADATA == resource_key:
+            msg = f"{resource_key.capitalize()} is not a " \
+                  f"configurable resource"
+            raise Exception(msg)
+        else:
+            msg = f"{operation} on invalid key {resource_key}"
+            LOGGER.error(msg)
+            raise Exception(msg)
+
+        LOGGER.debug("%s operation: %s", resource_key.capitalize(), operation)
+
+        return True
diff --git a/src/device/service/drivers/p4/p4_exception.py b/src/device/service/drivers/p4/p4_exception.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e3afb723b3850fd9a9b2b1c4982bf8ae31b20f7
--- /dev/null
+++ b/src/device/service/drivers/p4/p4_exception.py
@@ -0,0 +1,135 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+P4 driver exceptions.
+"""
+
+
+class UserError(Exception):
+    """
+    User error exception.
+    """
+    def __init__(self, info=""):
+        super().__init__()
+        self.info = info
+
+    def __str__(self):
+        return self.info
+
+    # TODO: find better way to get a custom traceback  # pylint: disable=W0511
+    def _render_traceback_(self):
+        return [str(self)]
+
+
+class InvalidP4InfoError(Exception):
+    """
+    Invalid P4 info exception.
+    """
+    def __init__(self, info=""):
+        super().__init__()
+        self.info = info
+
+    def __str__(self):
+        return f"Invalid P4Info message: {self.info}"
+
+    def _render_traceback_(self):
+        return [str(self)]
+
+
+class UnknownOptionName(UserError):
+    """
+    Unknown option name exception.
+    """
+    def __init__(self, option_name):
+        super().__init__()
+        self.option_name = option_name
+
+    def __str__(self):
+        return f"Unknown option name: {self.option_name}"
+
+
+class InvalidOptionValueType(UserError):
+    """
+    Invalid option value type exception.
+    """
+    def __init__(self, option, value):
+        super().__init__()
+        self.option = option
+        self.value = value
+
+    def __str__(self):
+        return f"Invalid value type for option {self.option.name}. "\
+               "Expected {self.option.value.__name__} but got "\
+               "value {self.value} with type {type(self.value).__name__}"
+
+
+class UserBadIPv4Error(UserError):
+    """
+    Invalid IPv4 address value exception.
+    """
+    def __init__(self, addr):
+        super().__init__()
+        self.addr = addr
+
+    def __str__(self):
+        return f"{self.addr}' is not a valid IPv4 address"
+
+    def _render_traceback_(self):
+        return [str(self)]
+
+
+class UserBadIPv6Error(UserError):
+    """
+    Invalid IPv6 address value exception.
+    """
+    def __init__(self, addr):
+        super().__init__()
+        self.addr = addr
+
+    def __str__(self):
+        return f"'{self.addr}' is not a valid IPv6 address"
+
+    def _render_traceback_(self):
+        return [str(self)]
+
+
+class UserBadMacError(UserError):
+    """
+    Invalid MAC address value exception.
+    """
+    def __init__(self, addr):
+        super().__init__()
+        self.addr = addr
+
+    def __str__(self):
+        return f"'{self.addr}' is not a valid MAC address"
+
+    def _render_traceback_(self):
+        return [str(self)]
+
+
+class UserBadValueError(UserError):
+    """
+    Invalid value exception.
+    """
+    def __init__(self, info=""):
+        super().__init__()
+        self.info = info
+
+    def __str__(self):
+        return self.info
+
+    def _render_traceback_(self):
+        return [str(self)]
diff --git a/src/device/service/drivers/p4/p4_global_options.py b/src/device/service/drivers/p4/p4_global_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..86043b671e9316dfeff2fb12db8ab3088386382a
--- /dev/null
+++ b/src/device/service/drivers/p4/p4_global_options.py
@@ -0,0 +1,204 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+P4Runtime global options.
+"""
+
+import enum
+try:
+    from .p4_exception import UnknownOptionName, InvalidOptionValueType
+except ImportError:
+    from p4_exception import UnknownOptionName, InvalidOptionValueType
+
+
+@enum.unique
+class Options(enum.Enum):
+    """
+    P4 options.
+    """
+    canonical_bytestrings = bool
+
+
+class GlobalOptions:
+    """
+    P4 global options.
+    """
+    option_defaults = {
+        Options.canonical_bytestrings: True,
+    }
+
+    option_helpstrings = {
+        Options.canonical_bytestrings: """
+Use byte-padded legacy format for binary strings sent to the P4Runtime server,
+instead of the canonical representation. See P4Runtime specification for details.
+"""
+    }
+
+    def __init__(self):
+        self._values = {}
+        self.reset()
+        self._option_names = [option.name for option in Options]
+        self._set_docstring()
+
+    def reset(self):
+        """
+        Reset all options to their defaults.
+
+        :return: void
+        """
+        for option in Options:
+            assert option in GlobalOptions.option_defaults
+            self._values[option] = GlobalOptions.option_defaults[option]
+
+    def _supported_options_as_str(self):
+        """
+        Return a comma-separated string of supported options.
+
+        :return: string of supported options
+        """
+        return ", ".join([f"{o.name} ({o.value.__name__})" for o in Options])
+
+    def _supported_options_as_str_verbose(self):
+        """
+        Return a detailed comma-separated string of supported options.
+
+        :return: string of supported options
+        """
+        opt_str = ""
+        for option in Options:
+            opt_str += f"Option name: {option.name}\n"
+            opt_str += f"Type: {option.value.__name__}\n"
+            opt_str += f"Default value: " \
+                       f"{GlobalOptions.option_defaults[option]}\n"
+            opt_str += f"Description: " \
+                   f"{GlobalOptions.option_helpstrings.get(option, 'N/A')}\n"
+            opt_str += "\n"
+        return opt_str[:-1]
+
+    def _set_docstring(self):
+        """
+        Set the documentation for this object.
+
+        :return: void
+        """
+        self.__doc__ = f"""
+Manage global options for the P4Runtime shell.
+Supported options are: {self._supported_options_as_str()}
+To set the value of a global option, use GLOBAL_OPTIONS["<option name>"] = <option value>
+To access the current value of a global option, use GLOBAL_OPTIONS.["<option name>"]
+To reset all options to their default value, use GLOBAL_OPTIONS.reset
+
+{self._supported_options_as_str_verbose()}
+"""
+
+    def __dir__(self):
+        """
+        Return all names in this scope.
+
+        :return: list of names in scope
+        """
+        return ["reset", "set", "get"]
+
+    def set_option(self, option, value):
+        """
+        Set an option's value.
+
+        :param option: option to set
+        :param value: option value
+        :return: void
+        """
+        self._values[option] = value
+
+    def get_option(self, option):
+        """
+        Get an option's value.
+
+        :param option: option to get
+        :return: option value
+        """
+        return self._values[option]
+
+    def set(self, name, value):
+        """
+        Create an option and set its value.
+
+        :param name: option name
+        :param value: option value
+        :return: void
+        """
+        try:
+            option = Options[name]
+        except KeyError as ex:
+            raise UnknownOptionName(name) from ex
+        if not isinstance(value, option.value):
+            raise InvalidOptionValueType(option, value)
+        self.set_option(option, value)
+
+    def get(self, name):
+        """
+        Get option by name.
+
+        :param name: option name
+        :return: option
+        """
+        try:
+            option = Options[name]
+        except KeyError as ex:
+            raise UnknownOptionName(name) from ex
+        return self.get_option(option)
+
+    def __setitem__(self, name, value):
+        self.set(name, value)
+
+    def __getitem__(self, name):
+        return self.get(name)
+
+    def __str__(self):
+        return '\n'.join([f"{o.name}: {v}" for o, v in self._values.items()])
+
+
+GLOBAL_OPTIONS = GlobalOptions()
+
+
+def to_canonical_bytes(bytes_):
+    """
+    Convert to canonical bytes.
+
+    :param bytes_: byte stream
+    :return: canonical bytes
+    """
+    if len(bytes_) == 0:
+        return bytes_
+    num_zeros = 0
+    for byte in bytes_:
+        if byte != 0:
+            break
+        num_zeros += 1
+    if num_zeros == len(bytes_):
+        return bytes_[:1]
+    return bytes_[num_zeros:]
+
+
+def make_canonical_if_option_set(bytes_):
+    """
+    Convert to canonical bytes if option is set.
+
+    :param bytes_: byte stream
+    :return: canonical bytes
+    """
+
+    if GLOBAL_OPTIONS.get_option(Options.canonical_bytestrings):
+        return to_canonical_bytes(bytes_)
+    return bytes_
diff --git a/src/device/service/drivers/p4/p4_manager.py b/src/device/service/drivers/p4/p4_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc25e80b5803bfdec7d802d41c136865f4c045e3
--- /dev/null
+++ b/src/device/service/drivers/p4/p4_manager.py
@@ -0,0 +1,5987 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+P4Runtime manager.
+"""
+
+import enum
+import os
+import queue
+import time
+import logging
+from collections import Counter, OrderedDict
+from threading import Thread
+from tabulate import tabulate
+from p4.v1 import p4runtime_pb2
+from p4.config.v1 import p4info_pb2
+
+try:
+    from .p4_client import P4RuntimeClient, P4RuntimeException,\
+        P4RuntimeWriteException, WriteOperation, parse_p4runtime_error
+    from .p4_context import P4RuntimeEntity, P4Type, Context
+    from .p4_global_options import make_canonical_if_option_set
+    from .p4_common import encode,\
+        parse_resource_string_from_json, parse_resource_integer_from_json,\
+        parse_resource_bytes_from_json, parse_match_operations_from_json,\
+        parse_action_parameters_from_json, parse_integer_list_from_json
+    from .p4_exception import UserError, InvalidP4InfoError
+except ImportError:
+    from p4_client import P4RuntimeClient, P4RuntimeException,\
+        P4RuntimeWriteException, WriteOperation, parse_p4runtime_error
+    from p4_context import P4RuntimeEntity, P4Type, Context
+    from p4_global_options import make_canonical_if_option_set
+    from p4_common import encode,\
+        parse_resource_string_from_json, parse_resource_integer_from_json,\
+        parse_resource_bytes_from_json, parse_match_operations_from_json,\
+        parse_action_parameters_from_json, parse_integer_list_from_json
+    from p4_exception import UserError, InvalidP4InfoError
+
+# Logger instance
+LOGGER = logging.getLogger(__name__)
+
+# Global P4Runtime context
+CONTEXT = Context()
+
+# Global P4Runtime client
+CLIENT = None
+
+# Constant P4 entities
+KEY_TABLE = "table"
+KEY_ACTION = "action"
+KEY_ACTION_PROFILE = "action_profile"
+KEY_COUNTER = "counter"
+KEY_DIR_COUNTER = "direct_counter"
+KEY_METER = "meter"
+KEY_DIR_METER = "direct_meter"
+KEY_CTL_PKT_METADATA = "controller_packet_metadata"
+
+
+def get_context():
+    """
+    Return P4 context.
+
+    :return: context object
+    """
+    return CONTEXT
+
+
+def get_client():
+    """
+    Return P4 client.
+
+    :return: P4Runtime client object
+    """
+    return CLIENT
+
+
+def get_api_version():
+    """
+    Get the supported P4Runtime API version.
+
+    :return: API version
+    """
+    return CLIENT.api_version()
+
+
+def get_table_type(table):
+    """
+    Assess the type of P4 table based upon the matching scheme.
+
+    :param table: P4 table
+    :return: P4 table type
+    """
+    for m_f in table.match_fields:
+        if m_f.match_type == p4info_pb2.MatchField.EXACT:
+            return p4info_pb2.MatchField.EXACT
+        if m_f.match_type == p4info_pb2.MatchField.LPM:
+            return p4info_pb2.MatchField.LPM
+        if m_f.match_type == p4info_pb2.MatchField.TERNARY:
+            return p4info_pb2.MatchField.TERNARY
+        if m_f.match_type == p4info_pb2.MatchField.RANGE:
+            return p4info_pb2.MatchField.RANGE
+        if m_f.match_type == p4info_pb2.MatchField.OPTIONAL:
+            return p4info_pb2.MatchField.OPTIONAL
+    return None
+
+
+def match_type_to_str(match_type):
+    """
+    Convert table match type to string.
+
+    :param match_type: table match type object
+    :return: table match type string
+    """
+    if match_type == p4info_pb2.MatchField.EXACT:
+        return "Exact"
+    if match_type == p4info_pb2.MatchField.LPM:
+        return "LPM"
+    if match_type == p4info_pb2.MatchField.TERNARY:
+        return "Ternary"
+    if match_type == p4info_pb2.MatchField.RANGE:
+        return "Range"
+    if match_type == p4info_pb2.MatchField.OPTIONAL:
+        return "Optional"
+    return None
+
+
+def insert_table_entry_exact(
+        table_name, match_map, action_name, action_params, metadata,
+        cnt_pkt=-1, cnt_byte=-1):
+    """
+    Insert an entry into an exact match table.
+
+    :param table_name: P4 table name
+    :param match_map: Map of match operations
+    :param action_name: Action name
+    :param action_params: Map of action parameters
+    :param metadata: table metadata
+    :param cnt_pkt: packet count
+    :param cnt_byte: byte count
+    :return: inserted entry
+    """
+    assert match_map, "Table entry without match operations is not accepted"
+    assert action_name, "Table entry without action is not accepted"
+
+    table_entry = TableEntry(table_name)(action=action_name)
+
+    for match_k, match_v in match_map.items():
+        table_entry.match[match_k] = match_v
+
+    for action_k, action_v in action_params.items():
+        table_entry.action[action_k] = action_v
+
+    if metadata:
+        table_entry.metadata = metadata
+
+    if cnt_pkt > 0:
+        table_entry.counter_data.packet_count = cnt_pkt
+
+    if cnt_byte > 0:
+        table_entry.counter_data.byte_count = cnt_byte
+
+    ex_msg = ""
+    try:
+        table_entry.insert()
+        LOGGER.info("Inserted exact table entry: %s", table_entry)
+    except P4RuntimeWriteException as ex:
+        ex_msg = str(ex)
+    except P4RuntimeException as ex:
+        raise P4RuntimeException from ex
+
+    # Table entry exists, needs to be modified
+    if "ALREADY_EXISTS" in ex_msg:
+        table_entry.modify()
+        LOGGER.info("Updated exact table entry: %s", table_entry)
+
+    return table_entry
+
+
+def insert_table_entry_ternary(
+        table_name, match_map, action_name, action_params, metadata,
+        priority, cnt_pkt=-1, cnt_byte=-1):
+    """
+    Insert an entry into a ternary match table.
+
+    :param table_name: P4 table name
+    :param match_map: Map of match operations
+    :param action_name: Action name
+    :param action_params: Map of action parameters
+    :param metadata: table metadata
+    :param priority: entry priority
+    :param cnt_pkt: packet count
+    :param cnt_byte: byte count
+    :return: inserted entry
+    """
+    assert match_map, "Table entry without match operations is not accepted"
+    assert action_name, "Table entry without action is not accepted"
+
+    table_entry = TableEntry(table_name)(action=action_name)
+
+    for match_k, match_v in match_map.items():
+        table_entry.match[match_k] = match_v
+
+    for action_k, action_v in action_params.items():
+        table_entry.action[action_k] = action_v
+
+    table_entry.priority = priority
+
+    if metadata:
+        table_entry.metadata = metadata
+
+    if cnt_pkt > 0:
+        table_entry.counter_data.packet_count = cnt_pkt
+
+    if cnt_byte > 0:
+        table_entry.counter_data.byte_count = cnt_byte
+
+    ex_msg = ""
+    try:
+        table_entry.insert()
+        LOGGER.info("Inserted ternary table entry: %s", table_entry)
+    except P4RuntimeWriteException as ex:
+        ex_msg = str(ex)
+    except P4RuntimeException as ex:
+        raise P4RuntimeException from ex
+
+    # Table entry exists, needs to be modified
+    if "ALREADY_EXISTS" in ex_msg:
+        table_entry.modify()
+        LOGGER.info("Updated ternary table entry: %s", table_entry)
+
+    return table_entry
+
+
+def insert_table_entry_range(
+        table_name, match_map, action_name, action_params, metadata,
+        priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
+    """
+    Insert an entry into a range match table.
+
+    :param table_name: P4 table name
+    :param match_map: Map of match operations
+    :param action_name: Action name
+    :param action_params: Map of action parameters
+    :param metadata: table metadata
+    :param priority: entry priority
+    :param cnt_pkt: packet count
+    :param cnt_byte: byte count
+    :return: inserted entry
+    """
+    assert match_map, "Table entry without match operations is not accepted"
+    assert action_name, "Table entry without action is not accepted"
+
+    raise NotImplementedError(
+        "Range-based table insertion not implemented yet")
+
+
+def insert_table_entry_optional(
+        table_name, match_map, action_name, action_params, metadata,
+        priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
+    """
+    Insert an entry into an optional match table.
+
+    :param table_name: P4 table name
+    :param match_map: Map of match operations
+    :param action_name: Action name
+    :param action_params: Map of action parameters
+    :param metadata: table metadata
+    :param priority: entry priority
+    :param cnt_pkt: packet count
+    :param cnt_byte: byte count
+    :return: inserted entry
+    """
+    assert match_map, "Table entry without match operations is not accepted"
+    assert action_name, "Table entry without action is not accepted"
+
+    raise NotImplementedError(
+        "Optional-based table insertion not implemented yet")
+
+
+class P4Manager:
+    """
+    Class to manage the runtime entries of a P4 pipeline.
+    """
+
+    def __init__(self, device_id: int, ip_address: str, port: int,
+                 election_id: tuple, role_name=None, ssl_options=None):
+        global CLIENT
+
+        self.__id = device_id
+        self.__ip_address = ip_address
+        self.__port = int(port)
+        self.__endpoint = f"{self.__ip_address}:{self.__port}"
+        CLIENT = P4RuntimeClient(
+            self.__id, self.__endpoint, election_id, role_name, ssl_options)
+        self.__p4info = None
+
+        # Internal memory for whitebox management
+        # | -> P4 entities
+        self.p4_objects = {}
+
+        # | -> P4 entities
+        self.table_entries = {}
+        self.counter_entries = {}
+        self.direct_counter_entries = {}
+        self.meter_entries = {}
+        self.direct_meter_entries = {}
+        self.multicast_groups = {}
+        self.clone_session_entries = {}
+        self.action_profile_members = {}
+        self.action_profile_groups = {}
+
+    def start(self, p4bin_path, p4info_path):
+        """
+        Start the P4 manager. This involves:
+        (i) setting the forwarding pipeline of the target switch,
+        (ii) creating a P4 context object,
+        (iii) Discovering all the entities of the pipeline, and
+        (iv) initializing necessary data structures of the manager
+
+        :param p4bin_path: Path to the P4 binary file
+        :param p4info_path: Path to the P4 info file
+        :return: void
+        """
+
+        if not p4bin_path or not os.path.exists(p4bin_path):
+            LOGGER.warning("P4 binary file not found")
+
+        if not p4info_path or not os.path.exists(p4info_path):
+            LOGGER.warning("P4 info file not found")
+
+        # Forwarding pipeline is only set iff both files are present
+        if p4bin_path and p4info_path:
+            try:
+                CLIENT.set_fwd_pipe_config(p4info_path, p4bin_path)
+            except FileNotFoundError as ex:
+                LOGGER.critical(ex)
+                CLIENT.tear_down()
+                raise FileNotFoundError(ex) from ex
+            except P4RuntimeException as ex:
+                LOGGER.critical("Error when setting config")
+                LOGGER.critical(ex)
+                CLIENT.tear_down()
+                raise P4RuntimeException(ex) from ex
+            except Exception as ex:  # pylint: disable=broad-except
+                LOGGER.critical("Error when setting config")
+                CLIENT.tear_down()
+                raise Exception(ex) from ex
+
+        try:
+            self.__p4info = CLIENT.get_p4info()
+        except P4RuntimeException as ex:
+            LOGGER.critical("Error when retrieving P4Info")
+            LOGGER.critical(ex)
+            CLIENT.tear_down()
+            raise P4RuntimeException(ex) from ex
+
+        CONTEXT.set_p4info(self.__p4info)
+        self.__discover_objects()
+        self.__init_objects()
+        LOGGER.info("P4Runtime manager started")
+
+    def stop(self):
+        """
+        Stop the P4 manager. This involves:
+        (i) tearing the P4Runtime client down and
+        (ii) cleaning up the manager's internal memory
+
+        :return: void
+        """
+        global CLIENT
+
+        # gRPC client must already be instantiated
+        assert CLIENT
+
+        # Trigger connection tear down with the P4Runtime server
+        CLIENT.tear_down()
+        CLIENT = None
+        self.__clear()
+        LOGGER.info("P4Runtime manager stopped")
+
+    def __clear(self):
+        """
+        Reset basic members of the P4 manager.
+
+        :return: void
+        """
+        self.__id = None
+        self.__ip_address = None
+        self.__port = None
+        self.__endpoint = None
+        self.__clear_state()
+
+    def __clear_state(self):
+        """
+        Reset the manager's internal memory.
+
+        :return: void
+        """
+        self.table_entries.clear()
+        self.counter_entries.clear()
+        self.direct_counter_entries.clear()
+        self.meter_entries.clear()
+        self.direct_meter_entries.clear()
+        self.multicast_groups.clear()
+        self.clone_session_entries.clear()
+        self.action_profile_members.clear()
+        self.action_profile_groups.clear()
+        self.p4_objects.clear()
+
+    def __init_objects(self):
+        """
+        Parse the discovered P4 objects and initialize internal memory for all
+        the underlying P4 entities.
+
+        :return: void
+        """
+        global KEY_TABLE, KEY_ACTION, KEY_ACTION_PROFILE, \
+            KEY_COUNTER, KEY_DIR_COUNTER, \
+            KEY_METER, KEY_DIR_METER, \
+            KEY_CTL_PKT_METADATA
+
+        KEY_TABLE = P4Type.table.name
+        KEY_ACTION = P4Type.action.name
+        KEY_ACTION_PROFILE = P4Type.action_profile.name
+        KEY_COUNTER = P4Type.counter.name
+        KEY_DIR_COUNTER = P4Type.direct_counter.name
+        KEY_METER = P4Type.meter.name
+        KEY_DIR_METER = P4Type.direct_meter.name
+        KEY_CTL_PKT_METADATA = P4Type.controller_packet_metadata.name
+        assert (k for k in [
+            KEY_TABLE, KEY_ACTION, KEY_ACTION_PROFILE,
+            KEY_COUNTER, KEY_DIR_COUNTER,
+            KEY_METER, KEY_DIR_METER,
+            KEY_CTL_PKT_METADATA
+        ])
+
+        if not self.p4_objects:
+            LOGGER.warning(
+                "Cannot initialize internal memory without discovering "
+                "the pipeline\'s P4 objects")
+            return
+
+        # Initialize all sorts of entries
+        if KEY_TABLE in self.p4_objects:
+            for table in self.p4_objects[KEY_TABLE]:
+                self.table_entries[table.name] = []
+
+        if KEY_COUNTER in self.p4_objects:
+            for cnt in self.p4_objects[KEY_COUNTER]:
+                self.counter_entries[cnt.name] = []
+
+        if KEY_DIR_COUNTER in self.p4_objects:
+            for d_cnt in self.p4_objects[KEY_DIR_COUNTER]:
+                self.direct_counter_entries[d_cnt.name] = []
+
+        if KEY_METER in self.p4_objects:
+            for meter in self.p4_objects[KEY_METER]:
+                self.meter_entries[meter.name] = []
+
+        if KEY_DIR_METER in self.p4_objects:
+            for d_meter in self.p4_objects[KEY_DIR_METER]:
+                self.direct_meter_entries[d_meter.name] = []
+
+        if KEY_ACTION_PROFILE in self.p4_objects:
+            for act_prof in self.p4_objects[KEY_ACTION_PROFILE]:
+                self.action_profile_members[act_prof.name] = []
+                self.action_profile_groups[act_prof.name] = []
+
+    def __discover_objects(self):
+        """
+        Discover and store all P4 objects.
+
+        :return: void
+        """
+        self.__clear_state()
+
+        for obj_type in P4Type:
+            for obj in P4Objects(obj_type):
+                if obj_type.name not in self.p4_objects:
+                    self.p4_objects[obj_type.name] = []
+                self.p4_objects[obj_type.name].append(obj)
+
+    def get_table(self, table_name):
+        """
+        Get a P4 table by name.
+
+        :param table_name: P4 table name
+        :return: P4 table object
+        """
+        if KEY_TABLE not in self.p4_objects:
+            return None
+        for table in self.p4_objects[KEY_TABLE]:
+            if table.name == table_name:
+                return table
+        return None
+
+    def get_tables(self):
+        """
+        Get a list of all P4 tables.
+
+        :return: list of P4 tables or empty list
+        """
+        if KEY_TABLE not in self.p4_objects:
+            return []
+        return self.p4_objects[KEY_TABLE]
+
+    def get_action(self, action_name):
+        """
+        Get action by name.
+
+        :param action_name: name of a P4 action
+        :return: action object or None
+        """
+        if KEY_ACTION not in self.p4_objects:
+            return None
+        for action in self.p4_objects[KEY_ACTION]:
+            if action.name == action_name:
+                return action
+        return None
+
+    def get_actions(self):
+        """
+        Get a list of all P4 actions.
+
+        :return: list of P4 actions or empty list
+        """
+        if KEY_ACTION not in self.p4_objects:
+            return []
+        return self.p4_objects[KEY_ACTION]
+
+    def get_action_profile(self, action_prof_name):
+        """
+        Get action profile by name.
+
+        :param action_prof_name: name of the action profile
+        :return: action profile object or None
+        """
+        if KEY_ACTION_PROFILE not in self.p4_objects:
+            return None
+        for action_prof in self.p4_objects[KEY_ACTION_PROFILE]:
+            if action_prof.name == action_prof_name:
+                return action_prof
+        return None
+
+    def get_action_profiles(self):
+        """
+        Get a list of all P4 action profiles.
+
+        :return: list of P4 action profiles or empty list
+        """
+        if KEY_ACTION_PROFILE not in self.p4_objects:
+            return []
+        return self.p4_objects[KEY_ACTION_PROFILE]
+
+    def get_counter(self, cnt_name):
+        """
+        Get counter by name.
+
+        :param cnt_name: name of a P4 counter
+        :return: counter object or None
+        """
+        if KEY_COUNTER not in self.p4_objects:
+            return None
+        for cnt in self.p4_objects[KEY_COUNTER]:
+            if cnt.name == cnt_name:
+                return cnt
+        return None
+
+    def get_counters(self):
+        """
+        Get a list of all P4 counters.
+
+        :return: list of P4 counters or empty list
+        """
+        if KEY_COUNTER not in self.p4_objects:
+            return []
+        return self.p4_objects[KEY_COUNTER]
+
+    def get_direct_counter(self, dir_cnt_name):
+        """
+        Get direct counter by name.
+
+        :param dir_cnt_name: name of a direct P4 counter
+        :return: direct counter object or None
+        """
+        if KEY_DIR_COUNTER not in self.p4_objects:
+            return None
+        for d_cnt in self.p4_objects[KEY_DIR_COUNTER]:
+            if d_cnt.name == dir_cnt_name:
+                return d_cnt
+        return None
+
+    def get_direct_counters(self):
+        """
+        Get a list of all direct P4 counters.
+
+        :return: list of direct P4 counters or empty list
+        """
+        if KEY_DIR_COUNTER not in self.p4_objects:
+            return []
+        return self.p4_objects[KEY_DIR_COUNTER]
+
+    def get_meter(self, meter_name):
+        """
+        Get meter by name.
+
+        :param meter_name: name of a P4 meter
+        :return: meter object or None
+        """
+        if KEY_METER not in self.p4_objects:
+            return None
+        for meter in self.p4_objects[KEY_METER]:
+            if meter.name == meter_name:
+                return meter
+        return None
+
+    def get_meters(self):
+        """
+        Get a list of all P4 meters.
+
+        :return: list of P4 meters or empty list
+        """
+        if KEY_METER not in self.p4_objects:
+            return []
+        return self.p4_objects[KEY_METER]
+
+    def get_direct_meter(self, dir_meter_name):
+        """
+        Get direct meter by name.
+
+        :param dir_meter_name: name of a direct P4 meter
+        :return: direct meter object or None
+        """
+        if KEY_DIR_METER not in self.p4_objects:
+            return None
+        for d_meter in self.p4_objects[KEY_DIR_METER]:
+            if d_meter.name == dir_meter_name:
+                return d_meter
+        return None
+
+    def get_direct_meters(self):
+        """
+        Get a list of all direct P4 meters.
+
+        :return: list of direct P4 meters or empty list
+        """
+        if KEY_DIR_METER not in self.p4_objects:
+            return []
+        return self.p4_objects[KEY_DIR_METER]
+
+    def get_ctl_pkt_metadata(self, ctl_pkt_meta_name):
+        """
+        Get a packet replication object by name.
+
+        :param ctl_pkt_meta_name: name of a P4 packet replication object
+        :return: P4 packet replication object or None
+        """
+        if KEY_CTL_PKT_METADATA not in self.p4_objects:
+            return None
+        for pkt_meta in self.p4_objects[KEY_CTL_PKT_METADATA]:
+            if ctl_pkt_meta_name == pkt_meta.name:
+                return pkt_meta
+        return None
+
+    def get_resource_keys(self):
+        """
+        Retrieve the available P4 resource keys.
+
+        :return: list of P4 resource keys
+        """
+        return list(self.p4_objects.keys())
+
+    def count_active_entries(self):
+        """
+        Count the number of active entries across all supported P4 entities.
+
+        :return: active number of entries
+        """
+        tot_cnt = \
+            self.count_table_entries_all() + \
+            self.count_counter_entries_all() + \
+            self.count_direct_counter_entries_all() + \
+            self.count_meter_entries_all() + \
+            self.count_direct_meter_entries_all() + \
+            self.count_action_prof_member_entries_all() + \
+            self.count_action_prof_group_entries_all()
+
+        return tot_cnt
+
+    ############################################################################
+    # Table methods
+    ############################################################################
+    def get_table_names(self):
+        """
+        Retrieve a list of P4 table names.
+
+        :return: list of P4 table names
+        """
+        if KEY_TABLE not in self.p4_objects:
+            return []
+        return list(table.name for table in self.p4_objects[KEY_TABLE])
+
+    def get_table_entries(self, table_name, action_name=None):
+        """
+        Get a list of P4 table entries by table name and optionally by action.
+
+        :param table_name: name of a P4 table
+        :param action_name: action name
+        :return: list of P4 table entries or None
+        """
+        if table_name not in self.table_entries:
+            return None
+        self.table_entries[table_name].clear()
+        self.table_entries[table_name] = []
+
+        try:
+            for count, table_entry in enumerate(
+                    TableEntry(table_name)(action=action_name).read()):
+                LOGGER.debug(
+                    "Table %s - Entry %d\n%s", table_name, count, table_entry)
+                self.table_entries[table_name].append(table_entry)
+            return self.table_entries[table_name]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return []
+
+    def table_entries_to_json(self, table_name):
+        """
+        Encode all entries of a P4 table into a JSON object.
+
+        :param table_name: name of a P4 table
+        :return: JSON object with table entries
+        """
+        if (KEY_TABLE not in self.p4_objects) or \
+                not self.p4_objects[KEY_TABLE]:
+            LOGGER.warning("No table entries to retrieve\n")
+            return {}
+
+        table_res = {}
+
+        for table in self.p4_objects[KEY_TABLE]:
+            if not table.name == table_name:
+                continue
+
+            entries = self.get_table_entries(table.name)
+            if len(entries) == 0:
+                continue
+
+            table_res["table-name"] = table_name
+
+            for ent in entries:
+                entry_match_field = "\n".join(ent.match.fields())
+                entry_match_type = match_type_to_str(
+                    ent.match.match_type(entry_match_field))
+
+                table_res["id"] = ent.id
+                table_res["match-fields"] = []
+                for match_field in ent.match.fields():
+                    table_res["match-fields"].append(
+                        {
+                            "match-field": match_field,
+                            "match-value": ent.match.value(match_field),
+                            "match-type": entry_match_type
+                        }
+                    )
+                table_res["actions"] = []
+                table_res["actions"].append(
+                    {
+                        "action-id": ent.action.id(),
+                        "action": ent.action.alias()
+                    }
+                )
+                table_res["priority"] = ent.priority
+                table_res["is-default"] = ent.is_default
+                table_res["idle-timeout"] = ent.idle_timeout_ns
+                if ent.metadata:
+                    table_res["metadata"] = ent.metadata
+
+        return table_res
+
+    def count_table_entries(self, table_name, action_name=None):
+        """
+        Count the number of entries in a P4 table.
+
+        :param table_name: name of a P4 table
+        :param action_name: action name
+        :return: number of P4 table entries or negative integer
+        upon missing table
+        """
+        entries = self.get_table_entries(table_name, action_name)
+        if entries is None:
+            return -1
+        return len(entries)
+
+    def count_table_entries_all(self):
+        """
+        Count all entries in a P4 table.
+
+        :return: number of P4 table entries
+        """
+        total_cnt = 0
+        for table_name in self.get_table_names():
+            cnt = self.count_table_entries(table_name)
+            if cnt < 0:
+                continue
+            total_cnt += cnt
+        return total_cnt
+
+    def table_entry_operation_from_json(
+            self, json_resource, operation: WriteOperation):
+        """
+        Parse a JSON-based table entry and insert/update/delete it
+        into/from the switch.
+
+        :param json_resource: JSON-based table entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+
+        table_name = parse_resource_string_from_json(
+            json_resource, "table-name")
+        match_map = parse_match_operations_from_json(json_resource)
+        action_name = parse_resource_string_from_json(
+            json_resource, "action-name")
+        action_params = parse_action_parameters_from_json(json_resource)
+        priority = parse_resource_integer_from_json(json_resource, "priority")
+        metadata = parse_resource_bytes_from_json(json_resource, "metadata")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            LOGGER.debug("Table entry to insert/update: %s", json_resource)
+            return self.insert_table_entry(
+                table_name=table_name,
+                match_map=match_map,
+                action_name=action_name,
+                action_params=action_params,
+                priority=priority,
+                metadata=metadata if metadata else None
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug("Table entry to delete: %s", json_resource)
+            return self.delete_table_entry(
+                table_name=table_name,
+                match_map=match_map,
+                action_name=action_name,
+                action_params=action_params,
+                priority=priority
+            )
+        return None
+
+    def insert_table_entry(self, table_name,
+                           match_map, action_name, action_params,
+                           priority, metadata=None, cnt_pkt=-1, cnt_byte=-1):
+        """
+        Insert an entry into a P4 table.
+        This method has internal logic to discriminate among:
+        (i) Exact matches,
+        (ii) Ternary matches,
+        (iii) LPM matches,
+        (iv) Range matches, and
+        (v) Optional matches
+
+        :param table_name: name of a P4 table
+        :param match_map: map of match operations
+        :param action_name: action name
+        :param action_params: map of action parameters
+        :param priority: entry priority
+        :param metadata: entry metadata
+        :param cnt_pkt: packet count
+        :param cnt_byte: byte count
+        :return: inserted entry
+        """
+        table = self.get_table(table_name)
+        assert table, \
+            "P4 pipeline does not implement table " + table_name
+
+        if not get_table_type(table):
+            msg = f"Table {table_name} is undefined, cannot insert entry"
+            LOGGER.error(msg)
+            raise UserError(msg)
+
+        # Exact match is supported
+        if get_table_type(table) == p4info_pb2.MatchField.EXACT:
+            if priority != 0:
+                msg = f"Table {table_name} is non-ternary, priority must be 0"
+                LOGGER.error(msg)
+                raise UserError(msg)
+            return insert_table_entry_exact(
+                table_name, match_map, action_name, action_params, metadata,
+                cnt_pkt, cnt_byte)
+
+        # Ternary and LPM matches are supported
+        if get_table_type(table) in \
+                [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]:
+            if priority == 0:
+                msg = f"Table {table_name} is ternary, priority must be != 0"
+                LOGGER.error(msg)
+                raise UserError(msg)
+            return insert_table_entry_ternary(
+                table_name, match_map, action_name, action_params, metadata,
+                priority, cnt_pkt, cnt_byte)
+
+        # TODO: Cover RANGE match  # pylint: disable=W0511
+        if get_table_type(table) == p4info_pb2.MatchField.RANGE:
+            return insert_table_entry_range(
+                table_name, match_map, action_name, action_params, metadata,
+                priority, cnt_pkt, cnt_byte)
+
+        # TODO: Cover OPTIONAL match  # pylint: disable=W0511
+        if get_table_type(table) == p4info_pb2.MatchField.OPTIONAL:
+            return insert_table_entry_optional(
+                table_name, match_map, action_name, action_params, metadata,
+                priority, cnt_pkt, cnt_byte)
+
+        return None
+
+    def delete_table_entry(self, table_name,
+                           match_map, action_name, action_params, priority=0):
+        """
+        Delete an entry from a P4 table.
+
+        :param table_name: name of a P4 table
+        :param match_map: map of match operations
+        :param action_name: action name
+        :param action_params: map of action parameters
+        :param priority: entry priority
+        :return: deleted entry
+        """
+        table = self.get_table(table_name)
+        assert table, \
+            "P4 pipeline does not implement table " + table_name
+
+        if not get_table_type(table):
+            msg = f"Table {table_name} is undefined, cannot delete entry"
+            LOGGER.error(msg)
+            raise UserError(msg)
+
+        table_entry = TableEntry(table_name)(action=action_name)
+
+        for match_k, match_v in match_map.items():
+            table_entry.match[match_k] = match_v
+
+        for action_k, action_v in action_params.items():
+            table_entry.action[action_k] = action_v
+
+        if get_table_type(table) == p4info_pb2.MatchField.EXACT:
+            if priority != 0:
+                msg = f"Table {table_name} is non-ternary, priority must be 0"
+                LOGGER.error(msg)
+                raise UserError(msg)
+
+        if get_table_type(table) in \
+                [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]:
+            if priority == 0:
+                msg = f"Table {table_name} is ternary, priority must be != 0"
+                LOGGER.error(msg)
+                raise UserError(msg)
+
+        # TODO: Ensure correctness of RANGE & OPTIONAL  # pylint: disable=W0511
+        if get_table_type(table) in \
+                [p4info_pb2.MatchField.RANGE, p4info_pb2.MatchField.OPTIONAL]:
+            raise NotImplementedError(
+                "Range and optional-based table deletion not implemented yet")
+
+        table_entry.priority = priority
+
+        table_entry.delete()
+        LOGGER.info("Deleted entry %s from table: %s", table_entry, table_name)
+
+        return table_entry
+
+    def delete_table_entries(self, table_name):
+        """
+        Delete all entries of a P4 table.
+
+        :param table_name: name of a P4 table
+        :return: void
+        """
+        table = self.get_table(table_name)
+        assert table, \
+            "P4 pipeline does not implement table " + table_name
+
+        if not get_table_type(table):
+            msg = f"Table {table_name} is undefined, cannot delete entry"
+            LOGGER.error(msg)
+            raise UserError(msg)
+
+        TableEntry(table_name).read(function=lambda x: x.delete())
+        LOGGER.info("Deleted all entries from table: %s", table_name)
+
+    def print_table_entries_spec(self, table_name):
+        """
+        Print the specification of a P4 table.
+        Specification covers:
+        (i) match id,
+        (ii) match field name (e.g., ip_proto),
+        (iii) match type (e.g., exact, ternary, etc.),
+        (iv) match bitwidth
+        (v) action id, and
+        (vi) action name
+
+        :param table_name: name of a P4 table
+        :return: void
+        """
+        if (KEY_TABLE not in self.p4_objects) or \
+                not self.p4_objects[KEY_TABLE]:
+            LOGGER.warning("No table specification to print\n")
+            return
+
+        for table in self.p4_objects[KEY_TABLE]:
+            if not table.name == table_name:
+                continue
+
+            entry = []
+
+            for i, match_field in enumerate(table.match_fields):
+                table_name = table.name if i == 0 else ""
+                match_field_id = match_field.id
+                match_field_name = match_field.name
+                match_type_str = match_type_to_str(match_field.match_type)
+                match_field_bitwidth = match_field.bitwidth
+
+                entry.append(
+                    [
+                        table_name, str(match_field_id), match_field_name,
+                        match_type_str, str(match_field_bitwidth)
+                    ]
+                )
+
+            print(
+                tabulate(
+                    entry,
+                    headers=[
+                        KEY_TABLE, "match id", "match field",
+                        "match type", "match width"
+                    ],
+                    stralign="right",
+                    tablefmt="pretty"
+                )
+            )
+
+            entry.clear()
+
+            for i, action in enumerate(table.action_refs):
+                table_name = table.name if i == 0 else ""
+                action_id = action.id
+                action_name = CONTEXT.get_name_from_id(action.id)
+                entry.append([table_name, str(action_id), action_name])
+
+            print(
+                tabulate(
+                    entry,
+                    headers=[KEY_TABLE, "action id", "action name"],
+                    stralign="right",
+                    tablefmt="pretty"
+                )
+            )
+            print("\n")
+            entry.clear()
+
+    def print_table_entries_summary(self):
+        """
+        Print a summary of a P4 table state.
+        Summary covers:
+        (i) table name,
+        (ii) number of entries in the table, and
+        (iii) a string of \n-separated entry IDs.
+
+        :return: void
+        """
+        if (KEY_TABLE not in self.p4_objects) or \
+                not self.p4_objects[KEY_TABLE]:
+            LOGGER.warning("No tables to print\n")
+            return
+
+        entry = []
+
+        for table in self.p4_objects[KEY_TABLE]:
+            table_name = table.name
+            entries = self.get_table_entries(table_name)
+            entries_nb = len(entries)
+            entry_ids_str = "\n".join(str(e.id) for e in entries) \
+                if entries_nb > 0 else "-"
+
+            entry.append([table_name, entries_nb, entry_ids_str])
+
+        print(
+            tabulate(
+                entry,
+                headers=[KEY_TABLE, "# of entries", "entry ids"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    def print_table_entries(self, table_name):
+        """
+        Print all entries of a P4 table.
+
+        :param table_name: name of a P4 table
+        :return: void
+        """
+        if (KEY_TABLE not in self.p4_objects) or \
+                not self.p4_objects[KEY_TABLE]:
+            LOGGER.warning("No table entries to print\n")
+            return
+
+        for table in self.p4_objects[KEY_TABLE]:
+            if not table.name == table_name:
+                continue
+
+            entry = []
+
+            entries = self.get_table_entries(table.name)
+            for ent in entries:
+                entry_id = ent.id
+                mfs = ent.match.fields()
+                entry_match_field = "\n".join(mfs)
+                entry_match_value = "\n".join(
+                    ent.match.value(match_field) for match_field in mfs
+                )
+                entry_match_type = match_type_to_str(
+                    ent.match.match_type(entry_match_field))
+                entry_action_id = ent.action.id()
+                entry_action = ent.action.alias()
+                entry_priority = ent.priority
+                entry_is_default = ent.is_default
+                entry_idle_timeout_ns = ent.idle_timeout_ns
+                entry_metadata = ent.metadata
+
+                entry.append(
+                    [
+                        table_name, str(entry_id),
+                        entry_match_field, entry_match_value, entry_match_type,
+                        str(entry_action_id), entry_action,
+                        str(entry_priority), str(entry_is_default),
+                        str(entry_idle_timeout_ns), str(entry_metadata)
+                    ]
+                )
+
+            if not entry:
+                entry.append([table_name] + ["-"] * 10)
+
+            print(
+                tabulate(
+                    entry,
+                    headers=[
+                        KEY_TABLE, "table id",
+                        "match field", "match value", "match type",
+                        "action id", "action", "priority", "is default",
+                        "idle timeout (ns)", "metadata"
+                    ],
+                    stralign="right",
+                    tablefmt="pretty",
+                )
+            )
+            print("\n")
+
+    ############################################################################
+
+    ############################################################################
+    # Counter methods
+    ############################################################################
+    def get_counter_names(self):
+        """
+        Retrieve a list of P4 counter names.
+
+        :return: list of P4 counter names
+        """
+        if KEY_COUNTER not in self.p4_objects:
+            return []
+        return list(cnt.name for cnt in self.p4_objects[KEY_COUNTER])
+
+    def get_counter_entries(self, cnt_name):
+        """
+        Get a list of P4 counters by name.
+
+        :param cnt_name: name of a P4 counter
+        :return: list of P4 counters or None
+        """
+        if cnt_name not in self.counter_entries:
+            return None
+        self.counter_entries[cnt_name].clear()
+        self.counter_entries[cnt_name] = []
+
+        try:
+            for count, cnt_entry in enumerate(CounterEntry(cnt_name).read()):
+                LOGGER.debug(
+                    "Counter %s - Entry %d\n%s", cnt_name, count, cnt_entry)
+                self.counter_entries[cnt_name].append(cnt_entry)
+            return self.counter_entries[cnt_name]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return []
+
+    def counter_entries_to_json(self, cnt_name):
+        """
+        Encode all counter entries into a JSON object.
+
+        :param cnt_name: counter name
+        :return: JSON object with counter entries
+        """
+        if (KEY_COUNTER not in self.p4_objects) or \
+                not self.p4_objects[KEY_COUNTER]:
+            LOGGER.warning("No counter entries to retrieve\n")
+            return {}
+
+        cnt_res = {}
+
+        for cnt in self.p4_objects[KEY_COUNTER]:
+            if not cnt.name == cnt_name:
+                continue
+
+            entries = self.get_counter_entries(cnt.name)
+            if len(entries) == 0:
+                continue
+
+            cnt_res["counter-name"] = cnt_name
+
+            for ent in entries:
+                cnt_res["index"] = ent.index
+                cnt_res["packet-count"] = ent.packet_count
+                cnt_res["byte-count"] = ent.byte_count
+
+        return cnt_res
+
+    def count_counter_entries(self, cnt_name):
+        """
+        Count the number of P4 counter entries by counter name.
+
+        :param cnt_name: name of a P4 counter
+        :return: number of P4 counters or negative integer
+        upon missing counter
+        """
+        entries = self.get_counter_entries(cnt_name)
+        if entries is None:
+            return -1
+        return len(entries)
+
+    def count_counter_entries_all(self):
+        """
+        Count all entries of a P4 counter.
+
+        :return: number of P4 counter entries
+        """
+        total_cnt = 0
+        for cnt_name in self.get_counter_names():
+            cnt = self.count_counter_entries(cnt_name)
+            if cnt < 0:
+                continue
+            total_cnt += cnt
+        return total_cnt
+
+    def counter_entry_operation_from_json(self,
+                                          json_resource,
+                                          operation: WriteOperation):
+        """
+        Parse a JSON-based counter entry and insert/update/delete it
+        into/from the switch.
+
+        :param json_resource: JSON-based counter entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        cnt_name = parse_resource_string_from_json(
+            json_resource, "counter-name")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            index = parse_resource_integer_from_json(
+                json_resource, "index")
+            cnt_pkt = parse_resource_integer_from_json(
+                json_resource, "packet-count")
+            cnt_byte = parse_resource_integer_from_json(
+                json_resource, "byte-count")
+
+            LOGGER.debug("Counter entry to insert/update: %s", json_resource)
+            return self.insert_counter_entry(
+                cnt_name=cnt_name,
+                index=index,
+                cnt_pkt=cnt_pkt,
+                cnt_byte=cnt_byte
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug("Counter entry to delete: %s", json_resource)
+            return self.clear_counter_entry(
+                cnt_name=cnt_name
+            )
+        return None
+
+    def insert_counter_entry(self, cnt_name, index=None,
+                             cnt_pkt=-1, cnt_byte=-1):
+        """
+        Insert a P4 counter entry.
+
+        :param cnt_name: name of a P4 counter
+        :param index: counter index
+        :param cnt_pkt: packet count
+        :param cnt_byte: byte count
+        :return: inserted entry
+        """
+        cnt = self.get_counter(cnt_name)
+        assert cnt, \
+            "P4 pipeline does not implement counter " + cnt_name
+
+        cnt_entry = CounterEntry(cnt_name)
+
+        if index:
+            cnt_entry.index = index
+
+        if cnt_pkt > 0:
+            cnt_entry.packet_count = cnt_pkt
+
+        if cnt_byte > 0:
+            cnt_entry.byte_count = cnt_byte
+
+        cnt_entry.modify()
+        LOGGER.info("Updated counter entry: %s", cnt_entry)
+
+        return cnt_entry
+
+    def clear_counter_entry(self, cnt_name):
+        """
+        Clear the counters of a counter entry by name.
+
+        :param cnt_name: name of a P4 counter
+        :return: cleared entry
+        """
+        cnt = self.get_counter(cnt_name)
+        assert cnt, \
+            "P4 pipeline does not implement counter " + cnt_name
+
+        cnt_entry = CounterEntry(cnt_name)
+        cnt_entry.clear_data()
+        LOGGER.info("Cleared data of counter entry: %s", cnt_entry)
+
+        return cnt_entry
+
+    def print_counter_entries_summary(self):
+        """
+        Print a summary of a P4 counter state.
+        Summary covers:
+        (i) counter name,
+        (ii) number of entries in the table, and
+        (iii) a string of \n-separated entry IDs.
+
+        :return: void
+        """
+        if (KEY_COUNTER not in self.p4_objects) or \
+                not self.p4_objects[KEY_COUNTER]:
+            LOGGER.warning("No counters to print\n")
+            return
+
+        entry = []
+
+        for cnt in self.p4_objects[KEY_COUNTER]:
+            entries = self.get_counter_entries(cnt.name)
+            entries_nb = len(entries)
+            entry_ids_str = ",".join(str(e.id) for e in entries) \
+                if entries_nb > 0 else "-"
+            entry.append([cnt.name, str(entries_nb), entry_ids_str])
+
+        print(
+            tabulate(
+                entry,
+                headers=[KEY_COUNTER, "# of entries", "entry ids"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    ############################################################################
+
+    ############################################################################
+    # Direct counter methods
+    ############################################################################
+    def get_direct_counter_names(self):
+        """
+        Retrieve a list of direct P4 counter names.
+
+        :return: list of direct P4 counter names
+        """
+        if KEY_DIR_COUNTER not in self.p4_objects:
+            return []
+        return list(d_cnt.name for d_cnt in self.p4_objects[KEY_DIR_COUNTER])
+
+    def get_direct_counter_entries(self, d_cnt_name):
+        """
+        Get a list of direct P4 counters by name.
+
+        :param d_cnt_name: name of a direct P4 counter
+        :return: list of direct P4 counters or None
+        """
+        if d_cnt_name not in self.direct_counter_entries:
+            return None
+        self.direct_counter_entries[d_cnt_name].clear()
+        self.direct_counter_entries[d_cnt_name] = []
+
+        try:
+            for count, d_cnt_entry in enumerate(
+                    DirectCounterEntry(d_cnt_name).read()):
+                LOGGER.debug(
+                    "Direct counter %s - Entry %d\n%s",
+                    d_cnt_name, count, d_cnt_entry)
+                self.direct_counter_entries[d_cnt_name].append(d_cnt_entry)
+            return self.direct_counter_entries[d_cnt_name]
+        except P4RuntimeException as ex:
+            LOGGER.error("Failed to get direct counter %s entries: %s",
+                         d_cnt_name, str(ex))
+            return []
+
+    def direct_counter_entries_to_json(self, d_cnt_name):
+        """
+        Encode all direct counter entries into a JSON object.
+
+        :param d_cnt_name: direct counter name
+        :return: JSON object with direct counter entries
+        """
+        if (KEY_DIR_COUNTER not in self.p4_objects) or \
+                not self.p4_objects[KEY_DIR_COUNTER]:
+            LOGGER.warning("No direct counter entries to retrieve\n")
+            return {}
+
+        d_cnt_res = {}
+
+        for d_cnt in self.p4_objects[KEY_DIR_COUNTER]:
+            if not d_cnt.name == d_cnt_name:
+                continue
+
+            entries = self.get_direct_counter_entries(d_cnt.name)
+            if len(entries) == 0:
+                continue
+
+            d_cnt_res["direct-counter-name"] = d_cnt_name
+
+            for ent in entries:
+                d_cnt_res["match-fields"] = []
+                for k, v in ent.table_entry.match.items():
+                    d_cnt_res["match-fields"].append(
+                        {
+                            "match-field": k,
+                            "match-value": v
+                        }
+                    )
+                d_cnt_res["priority"] = ent.priority
+                d_cnt_res["packet-count"] = ent.packet_count
+                d_cnt_res["byte-count"] = ent.byte_count
+
+        return d_cnt_res
+
+    def count_direct_counter_entries(self, d_cnt_name):
+        """
+        Count the number of direct P4 counter entries by counter name.
+
+        :param d_cnt_name: name of a direct P4 counter
+        :return: number of direct P4 counters or negative integer
+        upon missing direct counter
+        """
+        entries = self.get_direct_counter_entries(d_cnt_name)
+        if entries is None:
+            return -1
+        return len(entries)
+
+    def count_direct_counter_entries_all(self):
+        """
+        Count all entries of a direct P4 counter.
+
+        :return: number of direct P4 counter entries
+        """
+        total_cnt = 0
+        for d_cnt_name in self.get_direct_counter_names():
+            cnt = self.count_direct_counter_entries(d_cnt_name)
+            if cnt < 0:
+                continue
+            total_cnt += cnt
+        return total_cnt
+
+    def direct_counter_entry_operation_from_json(self,
+                                                 json_resource,
+                                                 operation: WriteOperation):
+        """
+        Parse a JSON-based direct counter entry and insert/update/delete it
+        into/from the switch.
+
+        :param json_resource: JSON-based direct counter entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        d_cnt_name = parse_resource_string_from_json(
+            json_resource, "direct-counter-name")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            match_map = parse_match_operations_from_json(json_resource)
+            priority = parse_resource_integer_from_json(
+                json_resource, "priority")
+            cnt_pkt = parse_resource_integer_from_json(
+                json_resource, "packet-count")
+            cnt_byte = parse_resource_integer_from_json(
+                json_resource, "byte-count")
+
+            LOGGER.debug(
+                "Direct counter entry to insert/update: %s", json_resource)
+            return self.insert_direct_counter_entry(
+                d_cnt_name=d_cnt_name,
+                match_map=match_map,
+                priority=priority,
+                cnt_pkt=cnt_pkt,
+                cnt_byte=cnt_byte
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug("Direct counter entry to delete: %s", json_resource)
+            return self.clear_direct_counter_entry(
+                d_cnt_name=d_cnt_name
+            )
+        return None
+
+    def insert_direct_counter_entry(self, d_cnt_name, match_map,
+                                    priority, cnt_pkt=-1, cnt_byte=-1):
+        """
+        Insert a direct P4 counter entry.
+
+        :param d_cnt_name: name of a direct P4 counter
+        :param match_map: map of match operations
+        :param priority: entry priority
+        :param cnt_pkt: packet count
+        :param cnt_byte: byte count
+        :return: inserted entry
+        """
+        d_cnt = self.get_direct_counter(d_cnt_name)
+        assert d_cnt, \
+            "P4 pipeline does not implement direct counter " + d_cnt_name
+
+        assert match_map,\
+            "Direct counter entry without match operations is not accepted"
+
+        d_cnt_entry = DirectCounterEntry(d_cnt_name)
+
+        for match_k, match_v in match_map.items():
+            d_cnt_entry.table_entry.match[match_k] = match_v
+
+        d_cnt_entry.table_entry.priority = priority
+
+        if cnt_pkt > 0:
+            d_cnt_entry.packet_count = cnt_pkt
+
+        if cnt_byte > 0:
+            d_cnt_entry.byte_count = cnt_byte
+
+        d_cnt_entry.modify()
+        LOGGER.info("Updated direct counter entry: %s", d_cnt_entry)
+
+        return d_cnt_entry
+
+    def clear_direct_counter_entry(self, d_cnt_name):
+        """
+        Clear the counters of a direct counter entry by name.
+
+        :param d_cnt_name: name of a direct P4 counter
+        :return: cleared entry
+        """
+        d_cnt = self.get_direct_counter(d_cnt_name)
+        assert d_cnt, \
+            "P4 pipeline does not implement direct counter " + d_cnt_name
+
+        d_cnt_entry = DirectCounterEntry(d_cnt_name)
+        d_cnt_entry.clear_data()
+        LOGGER.info("Cleared direct counter entry: %s", d_cnt_entry)
+
+        return d_cnt_entry
+
+    def print_direct_counter_entries_summary(self):
+        """
+        Print a summary of a direct P4 counter state.
+        Summary covers:
+        (i) direct counter name,
+        (ii) number of entries in the table, and
+        (iii) a string of \n-separated entry IDs.
+
+        :return: void
+        """
+        if (KEY_DIR_COUNTER not in self.p4_objects) or \
+                not self.p4_objects[KEY_DIR_COUNTER]:
+            LOGGER.warning("No direct counters to print\n")
+            return
+
+        entry = []
+
+        for d_cnt in self.p4_objects[KEY_DIR_COUNTER]:
+            entries = self.get_direct_counter_entries(d_cnt.name)
+            entries_nb = len(entries)
+            entry_ids_str = ",".join(str(e.id) for e in entries) \
+                if entries_nb > 0 else "-"
+            entry.append([d_cnt.name, str(entries_nb), entry_ids_str])
+
+        print(
+            tabulate(
+                entry,
+                headers=[KEY_DIR_COUNTER, "# of entries", "entry ids"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    ############################################################################
+
+    ############################################################################
+    # Meter methods
+    ############################################################################
+    def get_meter_names(self):
+        """
+        Retrieve a list of P4 meter names.
+
+        :return: list of P4 meter names
+        """
+        if KEY_METER not in self.p4_objects:
+            return []
+        return list(meter.name for meter in self.p4_objects[KEY_METER])
+
+    def get_meter_entries(self, meter_name):
+        """
+        Get a list of P4 meters by name.
+
+        :param meter_name: name of a P4 meter
+        :return: list of P4 meters or None
+        """
+        if meter_name not in self.meter_entries:
+            return None
+        self.meter_entries[meter_name].clear()
+        self.meter_entries[meter_name] = []
+
+        try:
+            for count, meter_entry in enumerate(MeterEntry(meter_name).read()):
+                LOGGER.debug(
+                    "Meter %s - Entry %d\n%s", meter_name, count, meter_entry)
+                self.meter_entries[meter_name].append(meter_entry)
+            return self.meter_entries[meter_name]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return []
+
+    def meter_entries_to_json(self, meter_name):
+        """
+        Encode all meter entries into a JSON object.
+
+        :param meter_name: meter name
+        :return: JSON object with meter entries
+        """
+        if (KEY_METER not in self.p4_objects) or \
+                not self.p4_objects[KEY_METER]:
+            LOGGER.warning("No meter entries to retrieve\n")
+            return {}
+
+        meter_res = {}
+
+        for meter in self.p4_objects[KEY_METER]:
+            if not meter.name == meter_name:
+                continue
+
+            entries = self.get_meter_entries(meter.name)
+            if len(entries) == 0:
+                continue
+
+            meter_res["meter-name"] = meter_name
+
+            for ent in entries:
+                meter_res["index"] = ent.index
+                meter_res["cir"] = ent.cir
+                meter_res["cburst"] = ent.cburst
+                meter_res["pir"] = ent.pir
+                meter_res["pburst"] = ent.pburst
+
+        return meter_res
+
+    def count_meter_entries(self, meter_name):
+        """
+        Count the number of P4 meter entries by meter name.
+
+        :param meter_name: name of a P4 meter
+        :return: number of P4 meters or negative integer
+        upon missing meter
+        """
+        entries = self.get_meter_entries(meter_name)
+        if entries is None:
+            return -1
+        return len(entries)
+
+    def count_meter_entries_all(self):
+        """
+        Count all entries of a P4 meter.
+
+        :return: number of direct P4 meter entries
+        """
+        total_cnt = 0
+        for meter_name in self.get_meter_names():
+            cnt = self.count_meter_entries(meter_name)
+            if cnt < 0:
+                continue
+            total_cnt += cnt
+        return total_cnt
+
+    def meter_entry_operation_from_json(self,
+                                        json_resource,
+                                        operation: WriteOperation):
+        """
+        Parse a JSON-based meter entry and insert/update/delete it
+        into/from the switch.
+
+        :param json_resource: JSON-based meter entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        meter_name = parse_resource_string_from_json(
+            json_resource, "meter-name")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            index = parse_resource_integer_from_json(
+                json_resource, "index")
+            cir = parse_resource_integer_from_json(
+                json_resource, "committed-information-rate")
+            cburst = parse_resource_integer_from_json(
+                json_resource, "committed-burst-size")
+            pir = parse_resource_integer_from_json(
+                json_resource, "peak-information-rate")
+            pburst = parse_resource_integer_from_json(
+                json_resource, "peak-burst-size")
+
+            LOGGER.debug("Meter entry to insert/update: %s", json_resource)
+            return self.insert_meter_entry(
+                meter_name=meter_name,
+                index=index,
+                cir=cir,
+                cburst=cburst,
+                pir=pir,
+                pburst=pburst
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug("Meter entry to delete: %s", json_resource)
+            return self.clear_meter_entry(
+                meter_name=meter_name
+            )
+        return None
+
+    def insert_meter_entry(self, meter_name, index=None,
+                           cir=-1, cburst=-1, pir=-1, pburst=-1):
+        """
+        Insert a P4 meter entry.
+
+        :param meter_name: name of a P4 meter
+        :param index: P4 meter index
+        :param cir: meter's committed information rate
+        :param cburst: meter's committed burst size
+        :param pir: meter's peak information rate
+        :param pburst: meter's peak burst size
+        :return: inserted entry
+        """
+        meter = self.get_meter(meter_name)
+        assert meter, \
+            "P4 pipeline does not implement meter " + meter_name
+
+        meter_entry = MeterEntry(meter_name)
+
+        if index:
+            meter_entry.index = index
+
+        if cir > 0:
+            meter_entry.cir = cir
+
+        if cburst > 0:
+            meter_entry.cburst = cburst
+
+        if pir > 0:
+            meter_entry.pir = pir
+
+        if pburst > 0:
+            meter_entry.pburst = pburst
+
+        meter_entry.modify()
+        LOGGER.info("Updated meter entry: %s", meter_entry)
+
+        return meter_entry
+
+    def clear_meter_entry(self, meter_name):
+        """
+        Clear the rates and sizes of a meter entry by name.
+
+        :param meter_name: name of a P4 meter
+        :return: cleared entry
+        """
+        meter = self.get_meter(meter_name)
+        assert meter, \
+            "P4 pipeline does not implement meter " + meter_name
+
+        meter_entry = MeterEntry(meter_name)
+        meter_entry.clear_config()
+        LOGGER.info("Cleared meter entry: %s", meter_entry)
+
+        return meter_entry
+
+    def print_meter_entries_summary(self):
+        """
+        Print a summary of a P4 meter state.
+        Summary covers:
+        (i) meter name,
+        (ii) number of entries in the table, and
+        (iii) a string of \n-separated entry IDs.
+
+        :return: void
+        """
+        if (KEY_METER not in self.p4_objects) or \
+                not self.p4_objects[KEY_METER]:
+            LOGGER.warning("No meters to print\n")
+            return
+
+        entry = []
+
+        for meter in self.p4_objects[KEY_METER]:
+            entries = self.get_meter_entries(meter.name)
+            entries_nb = len(entries)
+            entry_ids_str = ",".join(str(e.id) for e in entries) \
+                if entries_nb > 0 else "-"
+            entry.append([meter.name, str(entries_nb), entry_ids_str])
+
+        print(
+            tabulate(
+                entry,
+                headers=[KEY_METER, "# of entries", "entry ids"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    ############################################################################
+
+    ############################################################################
+    # Direct meter methods
+    ############################################################################
+    def get_direct_meter_names(self):
+        """
+        Retrieve a list of direct P4 meter names.
+
+        :return: list of direct P4 meter names
+        """
+        if KEY_DIR_METER not in self.p4_objects:
+            return []
+        return list(d_meter.name for d_meter in self.p4_objects[KEY_DIR_METER])
+
+    def get_direct_meter_entries(self, d_meter_name):
+        """
+        Get a list of direct P4 meters by name.
+
+        :param d_meter_name: name of a direct P4 meter
+        :return: list of direct P4 meters or None
+        """
+        if d_meter_name not in self.direct_meter_entries:
+            return None
+        self.direct_meter_entries[d_meter_name].clear()
+        self.direct_meter_entries[d_meter_name] = []
+
+        try:
+            for count, d_meter_entry in enumerate(
+                    MeterEntry(d_meter_name).read()):
+                LOGGER.debug(
+                    "Direct meter %s - Entry %d\n%s",
+                    d_meter_name, count, d_meter_entry)
+                self.direct_meter_entries[d_meter_name].append(d_meter_entry)
+            return self.direct_meter_entries[d_meter_name]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return []
+
+    def direct_meter_entries_to_json(self, d_meter_name):
+        """
+        Encode all direct meter entries into a JSON object.
+
+        :param d_meter_name: direct meter name
+        :return: JSON object with direct meter entries
+        """
+        if (KEY_DIR_METER not in self.p4_objects) or \
+                not self.p4_objects[KEY_DIR_METER]:
+            LOGGER.warning("No direct meter entries to retrieve\n")
+            return {}
+
+        d_meter_res = {}
+
+        for d_meter in self.p4_objects[KEY_DIR_METER]:
+            if not d_meter.name == d_meter_name:
+                continue
+
+            entries = self.get_direct_meter_entries(d_meter.name)
+            if len(entries) == 0:
+                continue
+
+            d_meter_res["direct-meter-name"] = d_meter_name
+
+            for ent in entries:
+                d_meter_res["match-fields"] = []
+                for k, v in ent.table_entry.match.items():
+                    d_meter_res["match-fields"].append(
+                        {
+                            "match-field": k,
+                            "match-value": v
+                        }
+                    )
+                d_meter_res["cir"] = ent.cir
+                d_meter_res["cburst"] = ent.cburst
+                d_meter_res["pir"] = ent.pir
+                d_meter_res["pburst"] = ent.pburst
+
+        return d_meter_res
+
+    def count_direct_meter_entries(self, d_meter_name):
+        """
+        Count the number of direct P4 meter entries by meter name.
+
+        :param d_meter_name: name of a direct P4 meter
+        :return: number of direct P4 meters or negative integer
+        upon missing direct meter
+        """
+        entries = self.get_direct_meter_entries(d_meter_name)
+        if entries is None:
+            return -1
+        return len(entries)
+
+    def count_direct_meter_entries_all(self):
+        """
+        Count all entries of a direct P4 meter.
+
+        :return: number of direct P4 meter entries
+        """
+        total_cnt = 0
+        for d_meter_name in self.get_direct_meter_names():
+            cnt = self.count_direct_meter_entries(d_meter_name)
+            if cnt < 0:
+                continue
+            total_cnt += cnt
+        return total_cnt
+
+    def direct_meter_entry_operation_from_json(self,
+                                               json_resource,
+                                               operation: WriteOperation):
+        """
+        Parse a JSON-based direct meter entry and insert/update/delete it
+        into/from the switch.
+
+        :param json_resource: JSON-based direct meter entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        d_meter_name = parse_resource_string_from_json(
+            json_resource, "direct-meter-name")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            match_map = parse_match_operations_from_json(json_resource)
+            cir = parse_resource_integer_from_json(
+                json_resource, "committed-information-rate")
+            cburst = parse_resource_integer_from_json(
+                json_resource, "committed-burst-size")
+            pir = parse_resource_integer_from_json(
+                json_resource, "peak-information-rate")
+            pburst = parse_resource_integer_from_json(
+                json_resource, "peak-burst-size")
+
+            LOGGER.debug(
+                "Direct meter entry to insert/update: %s", json_resource)
+            return self.insert_direct_meter_entry(
+                d_meter_name=d_meter_name,
+                match_map=match_map,
+                cir=cir,
+                cburst=cburst,
+                pir=pir,
+                pburst=pburst
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug("Direct meter entry to delete: %s", json_resource)
+            return self.clear_direct_meter_entry(
+                d_meter_name=d_meter_name
+            )
+        return None
+
+    def insert_direct_meter_entry(self, d_meter_name, match_map,
+                                  cir=-1, cburst=-1, pir=-1, pburst=-1):
+        """
+        Insert a direct P4 meter entry.
+
+        :param d_meter_name: name of a direct P4 meter
+        :param match_map: map of P4 table match operations
+        :param cir: meter's committed information rate
+        :param cburst: meter's committed burst size
+        :param pir: meter's peak information rate
+        :param pburst: meter's peak burst size
+        :return: inserted entry
+        """
+        d_meter = self.get_direct_meter(d_meter_name)
+        assert d_meter, \
+            "P4 pipeline does not implement direct meter " + d_meter_name
+
+        assert match_map,\
+            "Direct meter entry without match operations is not accepted"
+
+        d_meter_entry = DirectMeterEntry(d_meter_name)
+
+        for match_k, match_v in match_map.items():
+            d_meter_entry.table_entry.match[match_k] = match_v
+
+        if cir > 0:
+            d_meter_entry.cir = cir
+
+        if cburst > 0:
+            d_meter_entry.cburst = cburst
+
+        if pir > 0:
+            d_meter_entry.pir = pir
+
+        if pburst > 0:
+            d_meter_entry.pburst = pburst
+
+        d_meter_entry.modify()
+        LOGGER.info("Updated direct meter entry: %s", d_meter_entry)
+
+        return d_meter_entry
+
+    def clear_direct_meter_entry(self, d_meter_name):
+        """
+        Clear the rates and sizes of a direct meter entry by name.
+
+        :param d_meter_name: name of a direct P4 meter
+        :return: cleared entry
+        """
+        d_meter = self.get_direct_meter(d_meter_name)
+        assert d_meter, \
+            "P4 pipeline does not implement direct meter " + d_meter_name
+
+        d_meter_entry = DirectMeterEntry(d_meter_name)
+        d_meter_entry.clear_config()
+        LOGGER.info("Cleared direct meter entry: %s", d_meter_entry)
+
+        return d_meter_entry
+
+    def print_direct_meter_entries_summary(self):
+        """
+        Print a summary of a direct P4 meter state.
+        Summary covers:
+        (i) direct meter name,
+        (ii) number of entries in the table, and
+        (iii) a string of \n-separated entry IDs.
+
+        :return: void
+        """
+        if (KEY_DIR_METER not in self.p4_objects) or \
+                not self.p4_objects[KEY_DIR_METER]:
+            LOGGER.warning("No direct meters to print\n")
+            return
+
+        entry = []
+
+        for d_meter in self.p4_objects[KEY_DIR_METER]:
+            entries = self.get_direct_meter_entries(d_meter.name)
+            entries_nb = len(entries)
+            entry_ids_str = ",".join(str(e.id) for e in entries) \
+                if entries_nb > 0 else "-"
+            entry.append([d_meter.name, str(entries_nb), entry_ids_str])
+
+        print(
+            tabulate(
+                entry,
+                headers=[KEY_DIR_METER, "# of entries", "entry ids"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    ############################################################################
+
+    ############################################################################
+    # Action profile member
+    ############################################################################
+    def get_action_profile_names(self):
+        """
+        Retrieve a list of action profile names.
+
+        :return: list of action profile names
+        """
+        if KEY_ACTION_PROFILE not in self.p4_objects:
+            return []
+        return list(ap_name for ap_name in self.p4_objects[KEY_ACTION_PROFILE])
+
+    def get_action_prof_member_entries(self, ap_name):
+        """
+        Get a list of action profile members by name.
+
+        :param ap_name: name of a P4 action profile
+        :return: list of P4 action profile members
+        """
+        if ap_name not in self.action_profile_members:
+            return None
+        self.action_profile_members[ap_name].clear()
+        self.action_profile_members[ap_name] = []
+
+        try:
+            for count, ap_entry in enumerate(
+                    ActionProfileMember(ap_name).read()):
+                LOGGER.debug(
+                    "Action profile member %s - Entry %d\n%s",
+                    ap_name, count, ap_entry)
+                self.action_profile_members[ap_name].append(ap_entry)
+            return self.action_profile_members[ap_name]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return []
+
+    def action_prof_member_entries_to_json(self, ap_name):
+        """
+        Encode all action profile members into a JSON object.
+
+        :param ap_name: name of a P4 action profile
+        :return: JSON object with action profile member entries
+        """
+        if (KEY_ACTION_PROFILE not in self.p4_objects) or \
+                not self.p4_objects[KEY_ACTION_PROFILE]:
+            LOGGER.warning("No action profile member entries to retrieve\n")
+            return {}
+
+        ap_res = {}
+
+        for act_p in self.p4_objects[KEY_ACTION_PROFILE]:
+            if not act_p.name == ap_name:
+                continue
+
+            ap_res["action-profile-name"] = ap_name
+
+            entries = self.get_action_prof_member_entries(ap_name)
+            for ent in entries:
+                action = ent.action
+                action_name = CONTEXT.get_name_from_id(action.id)
+                ap_res["action"] = action_name
+                ap_res["action-params"] = []
+                for k, v in action.items():
+                    ap_res["action-params"].append(
+                        {
+                            "param": k,
+                            "value": v
+                        }
+                    )
+
+                ap_res["member-id"] = ent.member_id
+
+        return ap_res
+
+    def count_action_prof_member_entries(self, ap_name):
+        """
+        Count the number of action profile members by name.
+
+        :param ap_name: name of a P4 action profile
+        :return: number of action profile members or negative integer
+        upon missing member
+        """
+        entries = self.get_action_prof_member_entries(ap_name)
+        if entries is None:
+            return -1
+        return len(entries)
+
+    def count_action_prof_member_entries_all(self):
+        """
+        Count all action profile member entries.
+
+        :return: number of action profile member entries
+        """
+        total_cnt = 0
+        for ap_name in self.get_action_profile_names():
+            cnt = self.count_action_prof_member_entries(ap_name)
+            if cnt < 0:
+                continue
+            total_cnt += cnt
+        return total_cnt
+
+    def action_prof_member_entry_operation_from_json(self,
+                                                     json_resource,
+                                                     operation: WriteOperation):
+        """
+        Parse a JSON-based action profile member entry and insert/update/delete
+        it into/from the switch.
+
+        :param json_resource: JSON-based action profile member entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        ap_name = parse_resource_string_from_json(
+            json_resource, "action-profile-name")
+        member_id = parse_resource_integer_from_json(json_resource, "member-id")
+        action_name = parse_resource_string_from_json(
+            json_resource, "action-name")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            action_params = parse_action_parameters_from_json(json_resource)
+
+            LOGGER.debug(
+                "Action profile member entry to insert/update: %s",
+                json_resource)
+            return self.insert_action_prof_member_entry(
+                ap_name=ap_name,
+                member_id=member_id,
+                action_name=action_name,
+                action_params=action_params
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug(
+                "Action profile member entry to delete: %s", json_resource)
+            return self.delete_action_prof_member_entry(
+                ap_name=ap_name,
+                member_id=member_id,
+                action_name=action_name
+            )
+        return None
+
+    def insert_action_prof_member_entry(self, ap_name, member_id,
+                                        action_name, action_params):
+        """
+        Insert a P4 action profile member entry.
+
+        :param ap_name: name of a P4 action profile
+        :param member_id: action profile member id
+        :param action_name: P4 action name
+        :param action_params: map of P4 action parameters
+        :return: inserted entry
+        """
+        act_p = self.get_action_profile(ap_name)
+        assert act_p, \
+            "P4 pipeline does not implement action profile " + ap_name
+
+        ap_member_entry = ActionProfileMember(ap_name)(
+            member_id=member_id, action=action_name)
+
+        for action_k, action_v in action_params.items():
+            ap_member_entry.action[action_k] = action_v
+
+        ex_msg = ""
+        try:
+            ap_member_entry.insert()
+            LOGGER.info(
+                "Inserted action profile member entry: %s", ap_member_entry)
+        except P4RuntimeWriteException as ex:
+            ex_msg = str(ex)
+        except P4RuntimeException as ex:
+            raise P4RuntimeException from ex
+
+        # Entry exists, needs to be modified
+        if "ALREADY_EXISTS" in ex_msg:
+            ap_member_entry.modify()
+            LOGGER.info(
+                "Updated action profile member entry: %s", ap_member_entry)
+
+        return ap_member_entry
+
+    def delete_action_prof_member_entry(self, ap_name, member_id, action_name):
+        """
+        Delete a P4 action profile member entry.
+
+        :param ap_name: name of a P4 action profile
+        :param member_id: action profile member id
+        :param action_name: P4 action name
+        :return: deleted entry
+        """
+        act_p = self.get_action_profile(ap_name)
+        assert act_p, \
+            "P4 pipeline does not implement action profile " + ap_name
+
+        ap_member_entry = ActionProfileMember(ap_name)(
+            member_id=member_id, action=action_name)
+        ap_member_entry.delete()
+        LOGGER.info("Deleted action profile member entry: %s", ap_member_entry)
+
+        return ap_member_entry
+
+    def print_action_prof_members_summary(self):
+        """
+        Print a summary of a P4 action profile member state.
+        Summary covers:
+        (i) action profile member id,
+        (ii) number of entries in the table, and
+        (iii) a string of \n-separated entry IDs.
+
+        :return: void
+        """
+        if (KEY_ACTION_PROFILE not in self.p4_objects) or \
+                not self.p4_objects[KEY_ACTION_PROFILE]:
+            LOGGER.warning("No action profile members to print\n")
+            return
+
+        entry = []
+
+        for ap_name in self.p4_objects[KEY_ACTION_PROFILE]:
+            entries = self.get_action_prof_member_entries(ap_name)
+            entries_nb = len(entries)
+            entry_ids_str = ",".join(str(e.member_id) for e in entries) \
+                if entries_nb > 0 else "-"
+            entry.append([ap_name, str(entries_nb), entry_ids_str])
+
+        print(
+            tabulate(
+                entry,
+                headers=["action profile member", "# of entries", "entry ids"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    def print_action_prof_member_entries(self, ap_name):
+        """
+        Print all entries of a P4 action profile member.
+
+        :param ap_name: name of a P4 action profile
+        :return: void
+        """
+        if (KEY_ACTION_PROFILE not in self.p4_objects) or \
+                not self.p4_objects[KEY_ACTION_PROFILE]:
+            LOGGER.warning("No action profile member entries to print\n")
+            return
+
+        for act_p in self.p4_objects[KEY_ACTION_PROFILE]:
+            if not act_p.name == ap_name:
+                continue
+
+            entry = []
+
+            entries = self.get_action_prof_member_entries(ap_name)
+            for ent in entries:
+                member_id = ent.member_id
+                action = ent.action
+                action_name = CONTEXT.get_name_from_id(action.id)
+
+                entry.append([ap_name, str(member_id), action_name])
+
+            if not entry:
+                entry.append([ap_name] + ["-"] * 2)
+
+            print(
+                tabulate(
+                    entry,
+                    headers=["action profile member", "member id", "action"],
+                    stralign="right",
+                    tablefmt="pretty"
+                )
+            )
+            print("\n")
+
+    ############################################################################
+    # Action profile group
+    ############################################################################
+    def get_action_prof_group_entries(self, ap_name):
+        """
+        Get a list of action profile groups by name.
+
+        :param ap_name: name of a P4 action profile
+        :return: list of P4 action profile groups
+        """
+        if ap_name not in self.action_profile_groups:
+            return None
+        self.action_profile_groups[ap_name].clear()
+        self.action_profile_groups[ap_name] = []
+
+        try:
+            for count, ap_entry in enumerate(
+                    ActionProfileGroup(ap_name).read()):
+                LOGGER.debug("Action profile group %s - Entry %d\n%s",
+                             ap_name, count, ap_entry)
+                self.action_profile_groups[ap_name].append(ap_entry)
+            return self.action_profile_groups[ap_name]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return []
+
+    def count_action_prof_group_entries(self, ap_name):
+        """
+        Count the number of action profile groups by name.
+
+        :param ap_name: name of a P4 action profile
+        :return: number of action profile groups or negative integer
+        upon missing group
+        """
+        entries = self.get_action_prof_group_entries(ap_name)
+        if entries is None:
+            return -1
+        return len(entries)
+
+    def count_action_prof_group_entries_all(self):
+        """
+        Count all action profile group entries.
+
+        :return: number of action profile group entries
+        """
+        total_cnt = 0
+        for ap_name in self.get_action_profile_names():
+            cnt = self.count_action_prof_group_entries(ap_name)
+            if cnt < 0:
+                continue
+            total_cnt += cnt
+        return total_cnt
+
+    def action_prof_group_entries_to_json(self, ap_name):
+        """
+        Encode all action profile groups into a JSON object.
+
+        :param ap_name: name of a P4 action profile
+        :return: JSON object with action profile group entries
+        """
+        if (KEY_ACTION_PROFILE not in self.p4_objects) or \
+                not self.p4_objects[KEY_ACTION_PROFILE]:
+            LOGGER.warning("No action profile group entries to retrieve\n")
+            return {}
+
+        ap_res = {}
+
+        for act_p in self.p4_objects[KEY_ACTION_PROFILE]:
+            if not act_p.name == ap_name:
+                continue
+
+            ap_res["action-profile-name"] = ap_name
+
+            entries = self.get_action_prof_group_entries(ap_name)
+            for ent in entries:
+                ap_res["group-id"] = ent.group_id
+                ap_res["members"] = []
+                for mem in ent.members:
+                    ap_res["members"].append(
+                        {
+                            "member": mem
+                        }
+                    )
+
+        return ap_res
+
+    def action_prof_group_entry_operation_from_json(self,
+                                                    json_resource,
+                                                    operation: WriteOperation):
+        """
+        Parse a JSON-based action profile group entry and insert/update/delete
+        it into/from the switch.
+
+        :param json_resource: JSON-based action profile group entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        ap_name = parse_resource_string_from_json(
+            json_resource, "action-profile-name")
+        group_id = parse_resource_integer_from_json(json_resource, "group-id")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            members = parse_integer_list_from_json(
+                json_resource, "members", "member")
+
+            LOGGER.debug(
+                "Action profile group entry to insert/update: %s",
+                json_resource)
+            return self.insert_action_prof_group_entry(
+                ap_name=ap_name,
+                group_id=group_id,
+                members=members
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug(
+                "Action profile group entry to delete: %s", json_resource)
+            return self.delete_action_prof_group_entry(
+                ap_name=ap_name,
+                group_id=group_id
+            )
+        return None
+
+    def insert_action_prof_group_entry(self, ap_name, group_id, members=None):
+        """
+        Insert a P4 action profile group entry.
+
+        :param ap_name: name of a P4 action profile
+        :param group_id: action profile group id
+        :param members: list of associated action profile members
+        :return: inserted entry
+        """
+        ap = self.get_action_profile(ap_name)
+        assert ap, \
+            "P4 pipeline does not implement action profile " + ap_name
+
+        ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id)
+
+        if members:
+            for m in members:
+                ap_group_entry.add(member_id=m)
+
+        ex_msg = ""
+        try:
+            ap_group_entry.insert()
+            LOGGER.info(
+                "Inserted action profile group entry: %s", ap_group_entry)
+        except P4RuntimeWriteException as ex:
+            ex_msg = str(ex)
+        except P4RuntimeException as ex:
+            raise P4RuntimeException from ex
+
+        # Entry exists, needs to be modified
+        if "ALREADY_EXISTS" in ex_msg:
+            ap_group_entry.modify()
+            LOGGER.info(
+                "Updated action profile group entry: %s", ap_group_entry)
+
+        return ap_group_entry
+
+    def delete_action_prof_group_entry(self, ap_name, group_id):
+        """
+        Delete a P4 action profile group entry.
+
+        :param ap_name: name of a P4 action profile
+        :param group_id: action profile group id
+        :return: deleted entry
+        """
+        ap = self.get_action_profile(ap_name)
+        assert ap, \
+            "P4 pipeline does not implement action profile " + ap_name
+
+        ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id)
+        ap_group_entry.delete()
+        LOGGER.info("Deleted action profile group entry: %s", ap_group_entry)
+
+        return ap_group_entry
+
+    def clear_action_prof_group_entry(self, ap_name, group_id):
+        """
+        Clean a P4 action profile group entry.
+
+        :param ap_name: name of a P4 action profile
+        :param group_id: action profile group id
+        :return: cleaned entry
+        """
+        ap = self.get_action_profile(ap_name)
+        assert ap, \
+            "P4 pipeline does not implement action profile " + ap_name
+
+        ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id)
+        ap_group_entry.clear()
+        LOGGER.info("Cleared action profile group entry: %s", ap_group_entry)
+
+        return ap_group_entry
+
+    def print_action_prof_groups_summary(self):
+        """
+        Print a summary of a P4 action profile group state.
+        Summary covers:
+        (i) action profile group id,
+        (ii) number of entries in the table, and
+        (iii) a string of \n-separated entry IDs.
+
+        :return: void
+        """
+        if (KEY_ACTION_PROFILE not in self.p4_objects) or \
+                not self.p4_objects[KEY_ACTION_PROFILE]:
+            LOGGER.warning("No action profile groups to print\n")
+            return
+
+        entry = []
+
+        for ap_name in self.p4_objects[KEY_ACTION_PROFILE]:
+            entries = self.get_action_prof_group_entries(ap_name)
+            entries_nb = len(entries)
+            entry_ids_str = ",".join(str(e.group_id) for e in entries) \
+                if entries_nb > 0 else "-"
+            entry.append([ap_name, str(entries_nb), entry_ids_str])
+
+        print(
+            tabulate(
+                entry,
+                headers=["action profile group", "# of entries", "entry ids"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    def print_action_prof_group_entries(self, ap_name):
+        """
+        Print all entries of a P4 action profile group.
+
+        :param ap_name: name of a P4 action profile
+        :return: void
+        """
+        if (KEY_ACTION_PROFILE not in self.p4_objects) or \
+                not self.p4_objects[KEY_ACTION_PROFILE]:
+            LOGGER.warning("No action profile group entries to print\n")
+            return
+
+        for ap in self.p4_objects[KEY_ACTION_PROFILE]:
+            if not ap.name == ap_name:
+                continue
+
+            entry = []
+
+            entries = self.get_action_prof_group_entries(ap_name)
+            for e in entries:
+                group_id = e.group_id
+                members_str = "\n".join(m for m in e.members)
+                entry.append([ap_name, str(group_id), members_str])
+
+            if not entry:
+                entry.append([ap_name] + ["-"] * 2)
+
+            print(
+                tabulate(
+                    entry,
+                    headers=[
+                        "action profile group", "group id", "members"
+                    ],
+                    stralign="right",
+                    tablefmt="pretty"
+                )
+            )
+            print("\n")
+
+    ############################################################################
+    # Packet replication method 1: Multicast group
+    ############################################################################
+    def get_multicast_group_entry(self, group_id):
+        """
+        Get a multicast group entry by group id.
+
+        :param group_id: id of a multicast group
+        :return: multicast group entry or none
+        """
+        if group_id not in self.multicast_groups:
+            return None
+        self.multicast_groups[group_id] = None
+
+        try:
+            mcast_group = MulticastGroupEntry(group_id).read()
+            LOGGER.debug("Multicast group %d\n%s", group_id, mcast_group)
+            self.multicast_groups[group_id] = mcast_group
+            return self.multicast_groups[group_id]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return None
+
+    def count_multicast_groups(self):
+        """
+        Count the number of multicast groups.
+
+        :return: number of multicast groups
+        """
+        return len(self.multicast_groups.keys())
+
+    def multicast_group_entries_to_json(self):
+        """
+        Encode all multicast groups into a JSON object.
+
+        :return: JSON object with multicast group entries
+        """
+        if not self.multicast_groups:
+            LOGGER.warning("No multicast group entries to retrieve\n")
+            return {}
+
+        mcast_list_res = []
+
+        for mcast_group in self.multicast_groups.values():
+            mcast_res = {}
+            mcast_res["group-id"] = mcast_group.group_id
+
+            mcast_res["egress-ports"] = []
+            mcast_res["instances"] = []
+            for r in mcast_group.replicas:
+                mcast_res["egress-ports"].append(
+                    {
+                        "egress-port": r.egress_port
+                    }
+                )
+                mcast_res["instances"].append(
+                    {
+                        "instance": r.instance
+                    }
+                )
+            mcast_list_res.append(mcast_res)
+
+        return mcast_list_res
+
+    def multicast_group_entry_operation_from_json(self,
+                                                  json_resource,
+                                                  operation: WriteOperation):
+        """
+        Parse a JSON-based multicast group entry and insert/update/delete it
+        into/from the switch.
+
+        :param json_resource: JSON-based multicast group entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        group_id = parse_resource_integer_from_json(json_resource, "group-id")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            ports = parse_integer_list_from_json(
+                json_resource, "ports", "port")
+
+            LOGGER.debug(
+                "Multicast group entry to insert/update: %s", json_resource)
+            return self.insert_multicast_group_entry(
+                group_id=group_id,
+                ports=ports
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug("Multicast group entry to delete: %s", json_resource)
+            return self.delete_multicast_group_entry(
+                group_id=group_id
+            )
+        return None
+
+    def insert_multicast_group_entry(self, group_id, ports):
+        """
+        Insert a new multicast group.
+
+        :param group_id: id of a multicast group
+        :param ports: list of egress ports to multicast
+        :return: inserted multicast group
+        """
+        assert group_id > 0, \
+            "Multicast group " + group_id + " must be > 0"
+        assert ports, \
+            "No multicast group ports are provided"
+
+        mcast_group = MulticastGroupEntry(group_id)
+        for p in ports:
+            mcast_group.add(p, 1)
+
+        ex_msg = ""
+        try:
+            mcast_group.insert()
+            LOGGER.info("Inserted multicast group entry: %s", mcast_group)
+        except P4RuntimeWriteException as ex:
+            ex_msg = str(ex)
+        except P4RuntimeException as ex:
+            raise P4RuntimeException from ex
+
+        # Entry exists, needs to be modified
+        if "ALREADY_EXISTS" in ex_msg:
+            mcast_group.modify()
+            LOGGER.info("Updated multicast group entry: %s", mcast_group)
+
+        self.multicast_groups[group_id] = mcast_group
+
+        return mcast_group
+
+    def delete_multicast_group_entry(self, group_id):
+        """
+        Delete a multicast group by id.
+
+        :param group_id: id of a multicast group
+        :return: deleted multicast group
+        """
+        assert group_id > 0, \
+            "Multicast group " + group_id + " must be > 0"
+
+        mcast_group = MulticastGroupEntry(group_id)
+        mcast_group.delete()
+
+        if group_id in self.multicast_groups:
+            del self.multicast_groups[group_id]
+        LOGGER.info(
+            "Deleted multicast group %d", group_id)
+
+        return mcast_group
+
+    def delete_multicast_group_entries(self):
+        """
+        Delete all multicast groups.
+
+        :return: void
+        """
+        for mcast_group in MulticastGroupEntry().read():
+            gid = mcast_group.group_id
+            mcast_group.delete()
+            del self.multicast_groups[gid]
+
+        assert self.count_multicast_groups() == 0, \
+            "Failed to purge all multicast groups"
+        LOGGER.info("Deleted all multicast groups")
+
+    def print_multicast_groups_summary(self):
+        """
+        Print a summary of a P4 multicast group state.
+        Summary covers:
+        (i) multicast group id,
+        (ii) a string of \n-separated egress ports, and
+        (iii) a string of \n-separated replica instances.
+
+        :return: void
+        """
+        entry = []
+
+        for mcast_group in self.multicast_groups.values():
+            ports_str = "\n".join(
+                str(r.egress_port) for r in mcast_group.replicas)
+            inst_str = "\n".join(
+                str(r.instance) for r in mcast_group.replicas)
+            entry.append([str(mcast_group.group_id), ports_str, inst_str])
+
+        if not entry:
+            entry.append(3 * ["-"])
+
+        print(
+            tabulate(
+                entry,
+                headers=["multicast group id", "egress ports", "instances"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    ############################################################################
+    # Packet replication method 2: Clone session
+    ############################################################################
+    def get_clone_session_entry(self, session_id):
+        """
+        Get a clone session entry by session id.
+
+        :param session_id: id of a clone session
+        :return: clone session entry or none
+        """
+        if session_id not in self.clone_session_entries:
+            return None
+        self.clone_session_entries[session_id] = None
+
+        try:
+            session = CloneSessionEntry(session_id).read()
+            LOGGER.debug("Clone session %d\n%s", session_id, session)
+            self.clone_session_entries[session_id] = session
+            return self.clone_session_entries[session_id]
+        except P4RuntimeException as ex:
+            LOGGER.error(ex)
+            return None
+
+    def count_clone_session_entries(self):
+        """
+        Count the number of clone sessions.
+
+        :return: number of clone sessions
+        """
+        return len(self.clone_session_entries.keys())
+
+    def clone_session_entries_to_json(self):
+        """
+        Encode all clone sessions into a JSON object.
+
+        :return: JSON object with clone session entries
+        """
+        if not self.clone_session_entries:
+            LOGGER.warning("No clone session entries to retrieve\n")
+            return {}
+
+        session_list_res = []
+
+        for session in self.clone_session_entries.values():
+            session_res = {}
+            session_res["session-id"] = session.session_id
+
+            session_res["egress-ports"] = []
+            session_res["instances"] = []
+            for r in session.replicas:
+                session_res["egress-ports"].append(
+                    {
+                        "egress-port": r.egress_port
+                    }
+                )
+                session_res["instances"].append(
+                    {
+                        "instance": r.instance
+                    }
+                )
+            session_list_res.append(session_res)
+
+        return session_list_res
+
+    def clone_session_entry_operation_from_json(self,
+                                                json_resource,
+                                                operation: WriteOperation):
+        """
+        Parse a JSON-based clone session entry and insert/update/delete it
+        into/from the switch.
+
+        :param json_resource: JSON-based clone session entry
+        :param operation: Write operation (i.e., insert, modify, delete)
+        to perform.
+        :return: inserted entry or None in case of parsing error
+        """
+        session_id = parse_resource_integer_from_json(
+            json_resource, "session-id")
+
+        if operation in [WriteOperation.insert, WriteOperation.update]:
+            ports = parse_integer_list_from_json(
+                json_resource, "ports", "port")
+
+            LOGGER.debug(
+                "Clone session entry to insert/update: %s", json_resource)
+            return self.insert_clone_session_entry(
+                session_id=session_id,
+                ports=ports
+            )
+        if operation == WriteOperation.delete:
+            LOGGER.debug(
+                "Clone session entry to delete: %s", json_resource)
+            return self.delete_clone_session_entry(
+                session_id=session_id
+            )
+        return None
+
+    def insert_clone_session_entry(self, session_id, ports):
+        """
+        Insert a new clone session.
+
+        :param session_id: id of a clone session
+        :param ports: list of egress ports to clone session
+        :return: inserted clone session
+        """
+        assert session_id > 0, \
+            "Clone session " + session_id + " must be > 0"
+        assert ports, \
+            "No clone session ports are provided"
+
+        session = CloneSessionEntry(session_id)
+        for p in ports:
+            session.add(p, 1)
+
+        ex_msg = ""
+        try:
+            session.insert()
+            LOGGER.info("Inserted clone session entry: %s", session)
+        except P4RuntimeWriteException as ex:
+            ex_msg = str(ex)
+        except P4RuntimeException as ex:
+            raise P4RuntimeException from ex
+
+        # Entry exists, needs to be modified
+        if "ALREADY_EXISTS" in ex_msg:
+            session.modify()
+            LOGGER.info("Updated clone session entry: %s", session)
+
+        self.clone_session_entries[session_id] = session
+
+        return session
+
+    def delete_clone_session_entry(self, session_id):
+        """
+        Delete a clone session by id.
+
+        :param session_id: id of a clone session
+        :return: deleted clone session
+        """
+        assert session_id > 0, \
+            "Clone session " + session_id + " must be > 0"
+
+        session = CloneSessionEntry(session_id)
+        session.delete()
+
+        if session_id in self.clone_session_entries:
+            del self.clone_session_entries[session_id]
+        LOGGER.info(
+            "Deleted clone session %d", session_id)
+
+        return session
+
+    def delete_clone_session_entries(self):
+        """
+        Delete all clone sessions.
+
+        :return: void
+        """
+        for e in CloneSessionEntry().read():
+            sid = e.session_id
+            e.delete()
+            del self.clone_session_entries[sid]
+
+        assert self.count_multicast_groups() == 0, \
+            "Failed to purge all clone sessions"
+        LOGGER.info("Deleted all clone sessions")
+
+    def print_clone_sessions_summary(self):
+        """
+        Print a summary of a P4 clone session state.
+        Summary covers:
+        (i) clone session id,
+        (ii) a string of \n-separated egress ports, and
+        (iii) a string of \n-separated replica instances.
+
+        :return: void
+        """
+        entry = []
+
+        for session in self.clone_session_entries.values():
+            ports_str = "\n".join(
+                str(r.egress_port) for r in session.replicas)
+            inst_str = "\n".join(
+                str(r.instance) for r in session.replicas)
+            entry.append([str(session.session_id), ports_str, inst_str])
+
+        if not entry:
+            entry.append(3 * ["-"])
+
+        print(
+            tabulate(
+                entry,
+                headers=["clone session id", "egress ports", "instances"],
+                stralign="right",
+                tablefmt="pretty"
+            )
+        )
+        print("\n")
+
+    ############################################################################
+    # Packet replication method 3: Packet in
+    ############################################################################
+    def get_packet_metadata(self, meta_type, attr_name=None, attr_id=None):
+        """
+        Retrieve the pipeline's metadata by metadata type field.
+
+        :param meta_type: metadata type field
+        :param attr_name: metadata name field (optional)
+        :param attr_id: metadata id field (optional)
+        :return: packet metadata
+        """
+        for table in self.__p4info.controller_packet_metadata:
+            pre = table.preamble
+            if pre.name == meta_type:
+                for meta in table.metadata:
+                    if attr_name is not None:
+                        if meta.name == attr_name:
+                            return meta
+                    elif attr_id is not None:
+                        if meta.id == attr_id:
+                            return meta
+        raise AttributeError(
+            f"ControllerPacketMetadata {meta_type} has no metadata "
+            f"{attr_name if attr_name is not None else attr_id} (check P4Info)")
+
+    # TODO: test packet in  # pylint: disable=W0511
+    def create_packet_in(self, payload, metadata=None):
+        """
+        Create a packet-in object.
+
+        :param payload: packet-in payload
+        :param metadata: packet-in metadata (optional)
+        :return: packet-in object
+        """
+        if not self.p4_objects[KEY_CTL_PKT_METADATA]:
+            LOGGER.warning("Cannot create packet in. "
+                           "No controller packet metadata in the pipeline\n")
+            return None
+
+        packet_in = PacketOut()
+        packet_in.payload = payload
+        if metadata:
+            for name, value in metadata.items():
+                p4info_meta = self.get_packet_metadata("packet_in", name)
+                meta = packet_in.metadata.add()
+                meta.metadata_id = p4info_meta.id
+                meta.value = encode(value, p4info_meta.bitwidth)
+        return packet_in
+
+    def send_packet_in(self, payload, metadata=None, timeout=1):
+        """
+        Send a packet-in message.
+        Note that the sniff method is blocking, thus it should be invoked by
+        another thread.
+
+        :param payload: packet-in payload
+        :param metadata: packet-in metadata (optional)
+        :param timeout: packet-in timeout (defaults to 1s)
+        :return: void
+        """
+        packet_in = self.create_packet_in(payload, metadata)
+
+        # TODO: experimental piece of code  # pylint: disable=W0511
+        captured_packet = []
+
+        def _sniff_packet(captured_pkt):
+            """
+            Invoke packet-in sniff method.
+
+            :param captured_pkt: buffer for the packet to be captured
+            :return: void
+            """
+            captured_pkt += packet_in.sniff(timeout=timeout)
+
+        _t = Thread(target=_sniff_packet, args=(captured_packet,))
+        _t.start()
+        # P4Runtime client sends the packet to the switch
+        CLIENT.stream_in_q["packet"].put(packet_in)
+        _t.join()
+        LOGGER.info("Packet-in sent: %s", packet_in)
+
+    ############################################################################
+    # Packet replication method 4: Packet out
+    ############################################################################
+    # TODO: test packet out  # pylint: disable=W0511
+    def create_packet_out(self, payload, metadata=None):
+        """
+        Create a packet-out object.
+
+        :param payload: packet-out payload
+        :param metadata: packet-out metadata (optional)
+        :return: packet-out object
+        """
+        if not self.p4_objects[KEY_CTL_PKT_METADATA]:
+            LOGGER.warning("Cannot create packet out. "
+                           "No controller packet metadata in the pipeline\n")
+            return None
+
+        packet_out = PacketOut()
+        packet_out.payload = payload
+        if metadata:
+            for name, value in metadata.items():
+                p4info_meta = self.get_packet_metadata("packet_out", name)
+                meta = packet_out.metadata.add()
+                meta.metadata_id = p4info_meta.id
+                meta.value = encode(value, p4info_meta.bitwidth)
+        return packet_out
+
+    def send_packet_out(self, payload, metadata=None):
+        """
+        Send a packet-out message.
+
+        :param payload: packet-out payload
+        :param metadata: packet-out metadata (optional)
+        :return: void
+        """
+        packet_out = self.create_packet_out(payload, metadata)
+        packet_out.send()
+        LOGGER.info("Packet-out sent: %s", packet_out)
+
+    ############################################################################
+    # Packet replication method 5: Idle timeout notification
+    ############################################################################
+    # TODO: Support IdleTimeoutNotification  # pylint: disable=W0511
+    ############################################################################
+
+    def print_objects(self):
+        """
+        Print all P4 objects of the installed pipeline.
+
+        :return: void
+        """
+        if not self.p4_objects:
+            self.__discover_objects()
+
+        for obj_name, objects in self.p4_objects.items():
+            entry = []
+
+            for obj in objects:
+                entry.append([obj.name])
+
+            if not entry:
+                entry.append("-")
+            print(
+                tabulate(
+                    entry,
+                    headers=[obj_name],
+                    stralign="right",
+                    tablefmt="pretty"
+                )
+            )
+        print("\n")
+
+
+class P4Object:
+    """
+    P4 object.
+    """
+
+    def __init__(self, obj_type, obj):
+        self.name = obj.preamble.name
+        self.id = obj.preamble.id
+        self._obj_type = obj_type
+        self._obj = obj
+        self.__doc__ = f"""
+A wrapper around the P4Info Protobuf message for
+{obj_type.pretty_name} '{self.name}'.
+You can access any field from the message with <self>.<field name>.
+You can access the name directly with <self>.name.
+You can access the id directly with <self>.id.
+If you need the underlying Protobuf message, you can access it with msg().
+"""
+
+    def __getattr__(self, name):
+        return getattr(self._obj, name)
+
+    def __settattr__(self, name, value):
+        return UserError(
+            f"Operation {name}:{value} not supported")
+
+    def msg(self):
+        """Get Protobuf message object"""
+        return self._obj
+
+    def actions(self):
+        """Print list of actions, only for tables and action profiles."""
+        if self._obj_type == P4Type.table:
+            for action in self._obj.action_refs:
+                print(CONTEXT.get_name_from_id(action.id))
+        elif self._obj_type == P4Type.action_profile:
+            t_id = self._obj.table_ids[0]
+            t_name = CONTEXT.get_name_from_id(t_id)
+            t = CONTEXT.get_table(t_name)
+            for action in t.action_refs:
+                print(CONTEXT.get_name_from_id(action.id))
+        else:
+            raise UserError(
+                "'actions' is only available for tables and action profiles")
+
+
+class P4Objects:
+    """
+    P4 objects.
+    """
+
+    def __init__(self, obj_type):
+        self._obj_type = obj_type
+        self._names = sorted([name for name, _ in CONTEXT.get_objs(obj_type)])
+        self._iter = None
+        self.__doc__ = """
+All the {pnames} in the P4 program.
+To access a specific {pname}, use {p4info}['<name>'].
+You can use this class to iterate over all {pname} instances:
+\tfor x in {p4info}:
+\t\tprint(x.id)
+""".format(pname=obj_type.pretty_name, pnames=obj_type.pretty_names,
+           p4info=obj_type.p4info_name)
+
+    def __getitem__(self, name):
+        obj = CONTEXT.get_obj(self._obj_type, name)
+        if obj is None:
+            raise UserError(
+                f"{self._obj_type.pretty_name} '{name}' does not exist")
+        return P4Object(self._obj_type, obj)
+
+    def __setitem__(self, name, value):
+        raise UserError("Operation not allowed")
+
+    def __iter__(self):
+        self._iter = iter(self._names)
+        return self
+
+    def __next__(self):
+        name = next(self._iter)
+        return self[name]
+
+
+class MatchKey:
+    """
+    P4 match key.
+    """
+
+    def __init__(self, table_name, match_fields):
+        self._table_name = table_name
+        self._fields = OrderedDict()
+        self._fields_suffixes = {}
+        for mf in match_fields:
+            self._add_field(mf)
+        self._mk = OrderedDict()
+        self._set_docstring()
+
+    def _set_docstring(self):
+        self.__doc__ = f"Match key fields for table '{self._table_name}':\n\n"
+        for _, info in self._fields.items():
+            self.__doc__ += str(info)
+        self.__doc__ += """
+Set a field value with <self>['<field_name>'] = '...'
+  * For exact match: <self>['<f>'] = '<value>'
+  * For ternary match: <self>['<f>'] = '<value>&&&<mask>'
+  * For LPM match: <self>['<f>'] = '<value>/<mask>'
+  * For range match: <self>['<f>'] = '<value>..<mask>'
+  * For optional match: <self>['<f>'] = '<value>'
+
+If it's inconvenient to use the whole field name, you can use a unique suffix.
+
+You may also use <self>.set(<f>='<value>')
+\t(<f> must not include a '.' in this case,
+but remember that you can use a unique suffix)
+"""
+
+    def _get_mf(self, name):
+        if name in self._fields:
+            return self._fields[name]
+        if name in self._fields_suffixes:
+            return self._fields[self._fields_suffixes[name]]
+        raise UserError(
+            f"'{name}' is not a valid match field name, nor a valid unique "
+            f"suffix, for table '{self._table_name}'")
+
+    def __setitem__(self, name, value):
+        field_info = self._get_mf(name)
+        self._mk[name] = self._parse_mf(value, field_info)
+        print(self._mk[name])
+
+    def __getitem__(self, name):
+        _ = self._get_mf(name)
+        print(self._mk.get(name, "Unset"))
+
+    def _parse_mf(self, s, field_info):
+        if not isinstance(s, str):
+            raise UserError("Match field value must be a string")
+        if field_info.match_type == p4info_pb2.MatchField.EXACT:
+            return self._parse_mf_exact(s, field_info)
+        if field_info.match_type == p4info_pb2.MatchField.LPM:
+            return self._parse_mf_lpm(s, field_info)
+        if field_info.match_type == p4info_pb2.MatchField.TERNARY:
+            return self._parse_mf_ternary(s, field_info)
+        if field_info.match_type == p4info_pb2.MatchField.RANGE:
+            return self._parse_mf_range(s, field_info)
+        if field_info.match_type == p4info_pb2.MatchField.OPTIONAL:
+            return self._parse_mf_optional(s, field_info)
+        raise UserError(
+            f"Unsupported match type for field:\n{field_info}")
+
+    def _parse_mf_exact(self, s, field_info):
+        v = encode(s.strip(), field_info.bitwidth)
+        return self._sanitize_and_convert_mf_exact(v, field_info)
+
+    def _sanitize_and_convert_mf_exact(self, value, field_info):
+        mf = p4runtime_pb2.FieldMatch()
+        mf.field_id = field_info.id
+        mf.exact.value = make_canonical_if_option_set(value)
+        return mf
+
+    def _parse_mf_optional(self, s, field_info):
+        v = encode(s.strip(), field_info.bitwidth)
+        return self._sanitize_and_convert_mf_optional(v, field_info)
+
+    def _sanitize_and_convert_mf_optional(self, value, field_info):
+        mf = p4runtime_pb2.FieldMatch()
+        mf.field_id = field_info.id
+        mf.optional.value = make_canonical_if_option_set(value)
+        return mf
+
+    def _parse_mf_lpm(self, s, field_info):
+        try:
+            prefix, length = s.split('/')
+            prefix, length = prefix.strip(), length.strip()
+        except ValueError:
+            prefix = s
+            length = str(field_info.bitwidth)
+
+        prefix = encode(prefix, field_info.bitwidth)
+        try:
+            length = int(length)
+        except ValueError as ex:
+            raise UserError(f"'{length}' is not a valid prefix length") from ex
+
+        return self._sanitize_and_convert_mf_lpm(prefix, length, field_info)
+
+    def _sanitize_and_convert_mf_lpm(self, prefix, length, field_info):
+        if length == 0:
+            raise UserError(
+                "Ignoring LPM don't care match (prefix length of 0) "
+                "as per P4Runtime spec")
+
+        mf = p4runtime_pb2.FieldMatch()
+        mf.field_id = field_info.id
+        mf.lpm.prefix_len = length
+
+        first_byte_masked = length // 8
+        if first_byte_masked == len(prefix):
+            mf.lpm.value = prefix
+            return mf
+
+        barray = bytearray(prefix)
+        transformed = False
+        r = length % 8
+        byte_mask = 0xff & ((0xff << (8 - r)))
+        if barray[first_byte_masked] & byte_mask != barray[first_byte_masked]:
+            transformed = True
+            barray[first_byte_masked] = barray[first_byte_masked] & byte_mask
+
+        for i in range(first_byte_masked + 1, len(prefix)):
+            if barray[i] != 0:
+                transformed = True
+                barray[i] = 0
+        if transformed:
+            print("LPM value was transformed to conform to the P4Runtime spec "
+                  "(trailing bits must be unset)")
+        mf.lpm.value = bytes(make_canonical_if_option_set(barray))
+        return mf
+
+    def _parse_mf_ternary(self, s, field_info):
+        try:
+            value, mask = s.split('&&&')
+            value, mask = value.strip(), mask.strip()
+        except ValueError:
+            value = s.strip()
+            mask = "0b" + ("1" * field_info.bitwidth)
+
+        value = encode(value, field_info.bitwidth)
+        mask = encode(mask, field_info.bitwidth)
+
+        return self._sanitize_and_convert_mf_ternary(value, mask, field_info)
+
+    def _sanitize_and_convert_mf_ternary(self, value, mask, field_info):
+        if int.from_bytes(mask, byteorder='big') == 0:
+            raise UserError(
+                "Ignoring ternary don't care match (mask of 0s) "
+                "as per P4Runtime spec")
+
+        mf = p4runtime_pb2.FieldMatch()
+        mf.field_id = field_info.id
+
+        barray = bytearray(value)
+        transformed = False
+        for i in range(len(value)):
+            if barray[i] & mask[i] != barray[i]:
+                transformed = True
+                barray[i] = barray[i] & mask[i]
+        if transformed:
+            print("Ternary value was transformed to conform to "
+                  "the P4Runtime spec (masked off bits must be unset)")
+        mf.ternary.value = bytes(
+            make_canonical_if_option_set(barray))
+        mf.ternary.mask = make_canonical_if_option_set(mask)
+        return mf
+
+    def _parse_mf_range(self, s, field_info):
+        try:
+            start, end = s.split('..')
+            start, end = start.strip(), end.strip()
+        except ValueError as ex:
+            raise UserError(f"'{s}' does not specify a valid range, "
+                            f"use '<start>..<end>'") from ex
+
+        start = encode(start, field_info.bitwidth)
+        end = encode(end, field_info.bitwidth)
+
+        return self._sanitize_and_convert_mf_range(start, end, field_info)
+
+    def _sanitize_and_convert_mf_range(self, start, end, field_info):
+        start_ = int.from_bytes(start, byteorder='big')
+        end_ = int.from_bytes(end, byteorder='big')
+        if start_ > end_:
+            raise UserError("Invalid range match: start is greater than end")
+        if start_ == 0 and end_ == ((1 << field_info.bitwidth) - 1):
+            raise UserError(
+                "Ignoring range don't care match (all possible values) "
+                "as per P4Runtime spec")
+        mf = p4runtime_pb2.FieldMatch()
+        mf.field_id = field_info.id
+        mf.range.low = make_canonical_if_option_set(start)
+        mf.range.high = make_canonical_if_option_set(end)
+        return mf
+
+    def _add_field(self, field_info):
+        self._fields[field_info.name] = field_info
+        self._recompute_suffixes()
+
+    def _recompute_suffixes(self):
+        suffixes = {}
+        suffix_count = Counter()
+        for fname in self._fields:
+            suffix = None
+            for s in reversed(fname.split(".")):
+                suffix = s if suffix is None else s + "." + suffix
+                suffixes[suffix] = fname
+                suffix_count[suffix] += 1
+        for suffix, c in suffix_count.items():
+            if c > 1:
+                del suffixes[suffix]
+        self._fields_suffixes = suffixes
+
+    def __str__(self):
+        return '\n'.join([str(mf) for name, mf in self._mk.items()])
+
+    def fields(self):
+        """
+        Return a list of match fields.
+
+        :return: list of match fields or None
+        """
+        fields = []
+        for name, _ in self._mk.items():
+            fields.append(name)
+        return fields
+
+    def value(self, field_name):
+        """
+        Get the value of a match field.
+
+        :param field_name: match field name
+        :return: match field value
+        """
+        for name, info in self._fields.items():
+            if name != field_name:
+                continue
+            if info.match_type == p4info_pb2.MatchField.EXACT:
+                return self._mk[name].exact.value.hex()
+            if info.match_type == p4info_pb2.MatchField.LPM:
+                return self._mk[name].lpm.value.hex()
+            if info.match_type == p4info_pb2.MatchField.TERNARY:
+                return self._mk[name].ternary.value.hex()
+            if info.match_type == p4info_pb2.MatchField.RANGE:
+                return self._mk[name].range.value.hex()
+            if info.match_type == p4info_pb2.MatchField.OPTIONAL:
+                return self._mk[name].optional.value.hex()
+        return None
+
+    def match_type(self, field_name):
+        """
+        Get the type of a match field.
+
+        :param field_name: match field name
+        :return: match field type
+        """
+        for name, info in self._fields.items():
+            if name not in field_name:
+                continue
+            return info.match_type
+        return None
+
+    def set(self, **kwargs):
+        """
+        Set match field parameter.
+
+        :param kwargs: parameters
+        :return: void
+        """
+        for name, value in kwargs.items():
+            self[name] = value
+
+    def clear(self):
+        """
+        Clear all match fields.
+
+        :return: void
+        """
+        self._mk.clear()
+
+    def _count(self):
+        return len(self._mk)
+
+
+class Action:
+    """
+    P4 action.
+    """
+
+    def __init__(self, action_name=None):
+        self._init = False
+        if action_name is None:
+            raise UserError("Please provide name for action")
+        self.action_name = action_name
+        action_info = CONTEXT.get_action(action_name)
+        if action_info is None:
+            raise UserError(f"Unknown action '{action_name}'")
+        self._action_id = action_info.preamble.id
+        self._params = OrderedDict()
+        for param in action_info.params:
+            self._params[param.name] = param
+        self._action_info = action_info
+        self._param_values = OrderedDict()
+        self._set_docstring()
+        self._init = True
+
+    def _set_docstring(self):
+        self.__doc__ = f"Action parameters for action '{self.action_name}':\n\n"
+        for _, info in self._params.items():
+            self.__doc__ += str(info)
+        self.__doc__ += "\n\n"
+        self.__doc__ += "Set a param value with " \
+                        "<self>['<param_name>'] = '<value>'\n"
+        self.__doc__ += "You may also use <self>.set(<param_name>='<value>')\n"
+
+    def _get_param(self, name):
+        if name not in self._params:
+            raise UserError("'{name}' is not a valid action parameter name "
+                            "for action '{self._action_name}'")
+        return self._params[name]
+
+    def __setattr__(self, name, value):
+        if name[0] == "_" or not self._init:
+            super().__setattr__(name, value)
+            return
+        if name == "action_name":
+            raise UserError("Cannot change action name")
+        super().__setattr__(name, value)
+
+    def __setitem__(self, name, value):
+        param_info = self._get_param(name)
+        self._param_values[name] = self._parse_param(value, param_info)
+        print(self._param_values[name])
+
+    def __getitem__(self, name):
+        _ = self._get_param(name)
+        print(self._param_values.get(name, "Unset"))
+
+    def _parse_param(self, s, param_info):
+        if not isinstance(s, str):
+            raise UserError("Action parameter value must be a string")
+        v = encode(s, param_info.bitwidth)
+        p = p4runtime_pb2.Action.Param()
+        p.param_id = param_info.id
+        p.value = make_canonical_if_option_set(v)
+        return p
+
+    def msg(self):
+        """
+        Create an action message.
+
+        :return: action message
+        """
+        msg = p4runtime_pb2.Action()
+        msg.action_id = self._action_id
+        msg.params.extend(self._param_values.values())
+        return msg
+
+    def _from_msg(self, msg):
+        assert self._action_id == msg.action_id
+        self._params.clear()
+        for p in msg.params:
+            p_name = CONTEXT.get_param_name(self.action_name, p.param_id)
+            self._param_values[p_name] = p
+
+    def __str__(self):
+        return str(self.msg())
+
+    def id(self):
+        """
+        Get action ID.
+
+        :return: action ID
+        """
+        return self._action_info.preamble.id
+
+    def alias(self):
+        """
+        Get action alias.
+
+        :return: action alias
+        """
+        return str(self._action_info.preamble.alias)
+
+    def set(self, **kwargs):
+        """
+        Set action parameters.
+
+        :param kwargs: parameters
+        :return: void
+        """
+        for name, value in kwargs.items():
+            self[name] = value
+
+
+class _EntityBase:
+    """
+    Basic entity.
+    """
+
+    def __init__(self, entity_type, p4runtime_cls, modify_only=False):
+        self._init = False
+        self._entity_type = entity_type
+        self._entry = p4runtime_cls()
+        self._modify_only = modify_only
+
+    def __dir__(self):
+        d = ["msg", "read"]
+        if self._modify_only:
+            d.append("modify")
+        else:
+            d.extend(["insert", "modify", "delete"])
+        return d
+
+    # to be called before issuing a P4Runtime request
+    # enforces checks that cannot be performed when setting individual fields
+    def _validate_msg(self):
+        return True
+
+    def _update_msg(self):
+        pass
+
+    def __getattr__(self, name):
+        raise AttributeError(f"'{self.__class__.__name__}' object "
+                             f"has no attribute '{name}'")
+
+    def msg(self):
+        """
+        Get a basic entity message.
+
+        :return: entity message
+        """
+        self._update_msg()
+        return self._entry
+
+    def _write(self, type_):
+        self._update_msg()
+        self._validate_msg()
+        update = p4runtime_pb2.Update()
+        update.type = type_
+        getattr(update.entity, self._entity_type.name).CopyFrom(self._entry)
+        CLIENT.write_update(update)
+
+    def insert(self):
+        """
+        Insert an entity.
+
+        :return: void
+        """
+        if self._modify_only:
+            raise NotImplementedError(
+                f"Insert not supported for {self._entity_type.name}")
+        logging.debug("Inserting entry")
+        self._write(p4runtime_pb2.Update.INSERT)
+
+    def delete(self):
+        """
+        Delete an entity.
+
+        :return: void
+        """
+        if self._modify_only:
+            raise NotImplementedError(
+                f"Delete not supported for {self._entity_type.name}")
+        logging.debug("Deleting entry")
+        self._write(p4runtime_pb2.Update.DELETE)
+
+    def modify(self):
+        """
+        Modify an entity.
+
+        :return: void
+        """
+        logging.debug("Modifying entry")
+        self._write(p4runtime_pb2.Update.MODIFY)
+
+    def _from_msg(self, msg):
+        raise NotImplementedError
+
+    def read(self, function=None):
+        """
+        Read an entity.
+
+        :param function: function to read (optional)
+        :return: retrieved entity
+        """
+        # Entities should override this method and provide a helpful docstring
+        self._update_msg()
+        self._validate_msg()
+        entity = p4runtime_pb2.Entity()
+        getattr(entity, self._entity_type.name).CopyFrom(self._entry)
+
+        iterator = CLIENT.read_one(entity)
+
+        # Cannot use a (simpler) generator here as we need to
+        # decorate __next__ with @parse_p4runtime_error.
+        class _EntryIterator:
+            def __init__(self, entity, it):
+                self._entity = entity
+                self._it = it
+                self._entities_it = None
+
+            def __iter__(self):
+                return self
+
+            @parse_p4runtime_error
+            def __next__(self):
+                if self._entities_it is None:
+                    rep = next(self._it)
+                    self._entities_it = iter(rep.entities)
+                try:
+                    entity = next(self._entities_it)
+                except StopIteration:
+                    self._entities_it = None
+                    return next(self)
+
+                if isinstance(self._entity, _P4EntityBase):
+                    ent = type(self._entity)(
+                        self._entity.name)  # create new instance of same entity
+                else:
+                    ent = type(self._entity)()
+                msg = getattr(entity, self._entity._entity_type.name)
+                ent._from_msg(msg)
+                # neither of these should be needed
+                # ent._update_msg()
+                # ent._entry.CopyFrom(msg)
+                return ent
+
+        if function is None:
+            return _EntryIterator(self, iterator)
+        for x in _EntryIterator(self, iterator):
+            function(x)
+
+
+class _P4EntityBase(_EntityBase):
+    """
+    Basic P4 entity.
+    """
+
+    def __init__(self, p4_type, entity_type, p4runtime_cls, name=None,
+                 modify_only=False):
+        super().__init__(entity_type, p4runtime_cls, modify_only)
+        self._p4_type = p4_type
+        if name is None:
+            raise UserError(
+                f"Please provide name for {p4_type.pretty_name}")
+        self.name = name
+        self._info = P4Objects(p4_type)[name]
+        self.id = self._info.id
+
+    def __dir__(self):
+        return super().__dir__() + ["name", "id", "info"]
+
+    def _from_msg(self, msg):
+        raise NotImplementedError
+
+    def info(self):
+        """
+        Display P4Info entry for the object.
+
+        :return: P4 info entry
+        """
+        return self._info
+
+
+class ActionProfileMember(_P4EntityBase):
+    """
+    P4 action profile member.
+    """
+
+    def __init__(self, action_profile_name=None):
+        super().__init__(
+            P4Type.action_profile, P4RuntimeEntity.action_profile_member,
+            p4runtime_pb2.ActionProfileMember, action_profile_name)
+        self.member_id = 0
+        self.action = None
+        self._valid_action_ids = self._get_action_set()
+        self.__doc__ = f"""
+An action profile member for '{action_profile_name}'
+
+Use <self>.info to display the P4Info entry for the action profile.
+
+Set the member id with <self>.member_id = <expr>.
+
+To set the action specification <self>.action = <instance of type Action>.
+To set the value of action parameters,
+use <self>.action['<param name>'] = <expr>.
+Type <self>.action? for more details.
+
+
+Typical usage to insert an action profile member:
+m = action_profile_member['<action_profile_name>'](action='<action_name>',
+member_id=1)
+m.action['<p1>'] = ...
+...
+m.action['<pM>'] = ...
+# OR m.action.set(p1=..., ..., pM=...)
+m.insert
+
+For information about how to read members, use <self>.read?
+"""
+        self._init = True
+
+    def __dir__(self):
+        return super().__dir__() + ["member_id", "action"]
+
+    def _get_action_set(self):
+        t_id = self._info.table_ids[0]
+        t_name = CONTEXT.get_name_from_id(t_id)
+        t = CONTEXT.get_table(t_name)
+        return {action.id for action in t.action_refs}
+
+    def __call__(self, **kwargs):
+        for name, value in kwargs.items():
+            if name == "action" and isinstance(value, str):
+                value = Action(value)
+            setattr(self, name, value)
+        return self
+
+    def __setattr__(self, name, value):
+        if name[0] == "_" or not self._init:
+            super().__setattr__(name, value)
+            return
+        if name == "name":
+            raise UserError("Cannot change action profile name")
+        if name == "member_id":
+            if not isinstance(value, int):
+                raise UserError("member_id must be an integer")
+        if name == "action" and value is not None:
+            if not isinstance(value, Action):
+                raise UserError("action must be an instance of Action")
+            if not self._is_valid_action_id(value._action_id):
+                raise UserError(f"action '{value.action_name}' is not a valid "
+                                f"action for this action profile")
+        super().__setattr__(name, value)
+
+    def _is_valid_action_id(self, action_id):
+        return action_id in self._valid_action_ids
+
+    def _update_msg(self):
+        self._entry.action_profile_id = self.id
+        self._entry.member_id = self.member_id
+        if self.action is not None:
+            self._entry.action.CopyFrom(self.action.msg())
+
+    def _from_msg(self, msg):
+        self.member_id = msg.member_id
+        if msg.HasField('action'):
+            action = msg.action
+            action_name = CONTEXT.get_name_from_id(action.action_id)
+            self.action = Action(action_name)
+            self.action._from_msg(action)
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the appropriate fields unset).
+
+        If function is None, returns an iterator. Iterate over it to get all the
+        members (as ActionProfileMember instances) returned by the
+        server. Otherwise, function is applied to all the members returned
+        by the server.
+        """
+        return super().read(function)
+
+
+class GroupMember:
+    """
+    P4 group member.
+
+    A member in an ActionProfileGroup.
+    Construct with GroupMember(<member_id>, weight=<weight>, watch=<watch>,
+    watch_port=<watch_port>).
+    You can set / get attributes member_id (required), weight (default 1),
+    watch (default 0), watch_port (default "").
+    """
+
+    def __init__(self, member_id=None, weight=1, watch=0, watch_port=b""):
+        if member_id is None:
+            raise UserError("member_id is required")
+        self._msg = p4runtime_pb2.ActionProfileGroup.Member()
+        self._msg.member_id = member_id
+        self._msg.weight = weight
+        if watch:
+            self._msg.watch = watch
+        if watch_port:
+            self._msg.watch_port = watch_port
+
+    def __dir__(self):
+        return ["member_id", "weight", "watch", "watch_port"]
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name == "member_id":
+            if not isinstance(value, int):
+                raise UserError("member_id must be an integer")
+            self._msg.member_id = value
+            return
+        if name == "weight":
+            if not isinstance(value, int):
+                raise UserError("weight must be an integer")
+            self._msg.weight = value
+            return
+        if name == "watch":
+            if not isinstance(value, int):
+                raise UserError("watch must be an integer")
+            self._msg.watch = value
+            return
+        if name == "watch_port":
+            if not isinstance(value, bytes):
+                raise UserError("watch_port must be a byte string")
+            self._msg.watch_port = value
+            return
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name == "member_id":
+            return self._msg.member_id
+        if name == "weight":
+            return self._msg.weight
+        if name == "watch":
+            return self._msg.watch
+        if name == "watch_port":
+            return self._msg.watch_port
+        return super().__getattr__(name)
+
+    def __str__(self):
+        return str(self._msg)
+
+
+class ActionProfileGroup(_P4EntityBase):
+    """
+    P4 action profile group.
+    """
+
+    def __init__(self, action_profile_name=None):
+        super().__init__(
+            P4Type.action_profile, P4RuntimeEntity.action_profile_group,
+            p4runtime_pb2.ActionProfileGroup, action_profile_name)
+        self.group_id = 0
+        self.max_size = 0
+        self.members = []
+        self.__doc__ = f"""
+An action profile group for '{action_profile_name}'
+
+Use <self>.info to display the P4Info entry for the action profile.
+
+Set the group id with <self>.group_id = <expr>. Default is 0.
+Set the max size with <self>.max_size = <expr>. Default is 0.
+
+Add members to the group with <self>.add(<member_id>, weight=<weight>, watch=<watch>,
+watch_port=<watch_port>).
+weight, watch and watch port are optional (default to 1, 0 and "" respectively).
+
+Typical usage to insert an action profile group:
+g = action_profile_group['<action_profile_name>'](group_id=1)
+g.add(<member id 1>)
+g.add(<member id 2>)
+# OR g.add(<member id 1>).add(<member id 2>)
+
+For information about how to read groups, use <self>.read?
+"""
+        self._init = True
+
+    def __dir__(self):
+        return super().__dir__() + ["group_id", "max_size", "members", "add",
+                                    "clear"]
+
+    def __call__(self, **kwargs):
+        for name, value in kwargs.items():
+            setattr(self, name, value)
+        return self
+
+    def __setattr__(self, name, value):
+        if name[0] == "_" or not self._init:
+            super().__setattr__(name, value)
+            return
+        if name == "name":
+            raise UserError("Cannot change action profile name")
+        if name == "group_id":
+            if not isinstance(value, int):
+                raise UserError("group_id must be an integer")
+        if name == "members":
+            if not isinstance(value, list):
+                raise UserError("members must be a list of GroupMember objects")
+            for member in value:
+                if not isinstance(member, GroupMember):
+                    raise UserError(
+                        "members must be a list of GroupMember objects")
+        super().__setattr__(name, value)
+
+    def add(self, member_id=None, weight=1, watch=0, watch_port=b""):
+        """Add a member to the members list."""
+        self.members.append(GroupMember(member_id, weight, watch, watch_port))
+        return self
+
+    def clear(self):
+        """Empty members list."""
+        self.members = []
+
+    def _update_msg(self):
+        self._entry.action_profile_id = self.id
+        self._entry.group_id = self.group_id
+        self._entry.max_size = self.max_size
+        del self._entry.members[:]
+        for member in self.members:
+            if not isinstance(member, GroupMember):
+                raise UserError("members must be a list of GroupMember objects")
+            m = self._entry.members.add()
+            m.CopyFrom(member._msg)
+
+    def _from_msg(self, msg):
+        self.group_id = msg.group_id
+        self.max_size = msg.max_size
+        self.members = []
+        for member in msg.members:
+            self.add(member.member_id, member.weight, member.watch,
+                     member.watch_port)
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the appropriate fields unset).
+
+        If function is None, returns an iterator. Iterate over it to get all the
+        members (as ActionProfileGroup instances) returned by the
+        server. Otherwise, function is applied to all the groups returned by the
+        server.
+        """
+        return super().read(function)
+
+
+def _get_action_profile(table_name):
+    table = CONTEXT.get_table(table_name)
+    implementation_id = table.implementation_id
+    if implementation_id == 0:
+        return None
+    try:
+        implementation_name = CONTEXT.get_name_from_id(implementation_id)
+    except KeyError as ex:
+        raise InvalidP4InfoError(
+            f"Invalid implementation_id {implementation_id} for "
+            f"table '{table_name}'") from ex
+    ap = CONTEXT.get_obj(P4Type.action_profile, implementation_name)
+    if ap is None:
+        raise InvalidP4InfoError(
+            f"Unknown implementation for table '{table_name}'")
+    return ap
+
+
+class OneshotAction:
+    """
+    A P4 action in a oneshot action set.
+    Construct with OneshotAction(<action (Action instance)>,
+    weight=<weight>, watch=<watch>, watch_port=<watch_port>).
+    You can set / get attributes action (required), weight (default 1),
+    watch (default 0), watch_port (default "").
+    """
+
+    def __init__(self, action=None, weight=1, watch=0, watch_port=b""):
+        if action is None:
+            raise UserError("action is required")
+        self.action = action
+        self.weight = weight
+        self.watch = watch
+        self.watch_port = watch_port
+
+    def __dir__(self):
+        return ["action", "weight", "watch", "watch_port", "msg"]
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name == "action":
+            if not isinstance(value, Action):
+                raise UserError("action must be an instance of Action")
+        elif name == "weight":
+            if not isinstance(value, int):
+                raise UserError("weight must be an integer")
+        elif name == "watch":
+            if not isinstance(value, int):
+                raise UserError("watch must be an integer")
+        elif name == "watch_port":
+            print(type(value), value)
+            if not isinstance(value, bytes):
+                raise UserError("watch_port must be a byte string")
+        super().__setattr__(name, value)
+
+    def msg(self):
+        """
+        Create an one shot action message.
+
+        :return: one shot action message
+        """
+        msg = p4runtime_pb2.ActionProfileAction()
+        msg.action.CopyFrom(self.action.msg())
+        msg.weight = self.weight
+        if self.watch:
+            msg.watch = self.watch
+        if self.watch_port:
+            msg.watch_port = self.watch_port
+        return msg
+
+    def __str__(self):
+        return str(self.msg())
+
+
+class Oneshot:
+    """
+    One shot action set.
+    """
+
+    def __init__(self, table_name=None):
+        self._init = False
+        if table_name is None:
+            raise UserError("Please provide table name")
+        self.table_name = table_name
+        self.actions = []
+        self._table_info = P4Objects(P4Type.table)[table_name]
+        ap = _get_action_profile(table_name)
+        if not ap:
+            raise UserError("Cannot create Oneshot instance for a direct table")
+        if not ap.with_selector:
+            raise UserError(
+                "Cannot create Oneshot instance for a table "
+                "with an action profile without selector")
+        self.__doc__ = f"""
+A "oneshot" action set for table '{self.table_name}'.
+
+To add an action to the set, use <self>.add(<Action instance>).
+You can also access the set of actions with <self>.actions (which is a Python list).
+"""
+        self._init = True
+
+    def __dir__(self):
+        return ["table_name", "actions", "add", "msg"]
+
+    def __setattr__(self, name, value):
+        if name[0] == "_" or not self._init:
+            super().__setattr__(name, value)
+            return
+        if name == "table_name":
+            raise UserError("Cannot change table name")
+        if name == "actions":
+            if not isinstance(value, list):
+                raise UserError(
+                    "actions must be a list of OneshotAction objects")
+            for member in value:
+                if not isinstance(member, OneshotAction):
+                    raise UserError(
+                        "actions must be a list of OneshotAction objects")
+                if not self._is_valid_action_id(value.action._action_id):
+                    raise UserError(
+                        f"action '{value.action.action_name}' is not a valid "
+                        f"action for table {self.table_name}")
+        super().__setattr__(name, value)
+
+    def _is_valid_action_id(self, action_id):
+        for action_ref in self._table_info.action_refs:
+            if action_id == action_ref.id:
+                return True
+        return False
+
+    def add(self, action=None, weight=1, watch=0, watch_port=b""):
+        """
+        Add an action to the oneshot action set.
+
+        :param action: action object
+        :param weight: weight (integer)
+        :param watch: watch (integer)
+        :param watch_port: watch port
+        :return:
+        """
+        self.actions.append(OneshotAction(action, weight, watch, watch_port))
+        return self
+
+    def msg(self):
+        """
+        Create an action profile message.
+
+        :return: action profile message
+        """
+        msg = p4runtime_pb2.ActionProfileActionSet()
+        msg.action_profile_actions.extend(
+            [action.msg() for action in self.actions])
+        return msg
+
+    def _from_msg(self, msg):
+        for action in msg.action_profile_actions:
+            action_name = CONTEXT.get_name_from_id(action.action.action_id)
+            a = Action(action_name)
+            a._from_msg(action.action)
+            self.actions.append(OneshotAction(a, action.weight, action.watch,
+                                              action.watch_port))
+
+    def __str__(self):
+        return str(self.msg())
+
+
+class _CounterData:
+    """
+    P4 counter data.
+    """
+
+    @staticmethod
+    def attrs_for_counter_type(counter_type):
+        """
+        Return counter attributes.
+
+        :param counter_type: P4 counter type
+        :return: list of counter attributes
+        """
+        attrs = []
+        if counter_type in {p4info_pb2.CounterSpec.BYTES,
+                            p4info_pb2.CounterSpec.BOTH}:
+            attrs.append("byte_count")
+        if counter_type in {p4info_pb2.CounterSpec.PACKETS,
+                            p4info_pb2.CounterSpec.BOTH}:
+            attrs.append("packet_count")
+        return attrs
+
+    def __init__(self, counter_name, counter_type):
+        self._counter_name = counter_name
+        self._counter_type = counter_type
+        self._msg = p4runtime_pb2.CounterData()
+        self._attrs = _CounterData.attrs_for_counter_type(counter_type)
+
+    def __dir__(self):
+        return self._attrs
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name not in self._attrs:
+            type_name = p4info_pb2._COUNTERSPEC_UNIT.values_by_number[
+                self._counter_type].name
+            raise UserError(
+                f"Counter '{self._counter_name}' is of type '{type_name}', "
+                f"you cannot set '{name}'")
+        if not isinstance(value, int):
+            raise UserError(f"{name} must be an integer")
+        setattr(self._msg, name, value)
+
+    def __getattr__(self, name):
+        if name in ("byte_count", "packet_count"):
+            return getattr(self._msg, name)
+        raise AttributeError(f"'{self.__class__.__name__}' object has no "
+                             f"attribute '{name}'")
+
+    def msg(self):
+        """
+        Create a counter data message.
+
+        :return: counter data message
+        """
+        return self._msg
+
+    def _from_msg(self, msg):
+        self._msg.CopyFrom(msg)
+
+    def __str__(self):
+        return str(self.msg())
+
+    @classmethod
+    def set_count(cls, instance, counter_name, counter_type, name, value):
+        """
+        Set the value of a certain counter.
+
+        :param instance: counter instance
+        :param counter_name: counter name
+        :param counter_type: counter type
+        :param name: counter attribute name
+        :param value: counter attribute value
+        :return: updated counter instance
+        """
+        if instance is None:
+            d = cls(counter_name, counter_type)
+        else:
+            d = instance
+        setattr(d, name, value)
+        return d
+
+    @classmethod
+    def get_count(cls, instance, counter_name, counter_type, name):
+        """
+        Get the value of a certain counter.
+
+        :param instance:
+        :param counter_name: counter name
+        :param counter_type: counter type
+        :param name: counter attribute name
+        :return: counter name and value
+        """
+        if instance is None:
+            d = cls(counter_name, counter_type)
+        else:
+            d = instance
+        r = getattr(d, name)
+        return d, r
+
+
+class _MeterConfig:
+    """
+    P4 meter configuration.
+    """
+
+    @staticmethod
+    def attrs():
+        """
+        Get the attributes in this scope.
+
+        :return: list of scope attributes
+        """
+        return ["cir", "cburst", "pir", "pburst"]
+
+    def __init__(self, meter_name, meter_type):
+        self._meter_name = meter_name
+        self._meter_type = meter_type
+        self._msg = p4runtime_pb2.MeterConfig()
+        self._attrs = _MeterConfig.attrs()
+
+    def __dir__(self):
+        return self._attrs
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name in self._attrs:
+            if not isinstance(value, int):
+                raise UserError(f"{name} must be an integer")
+        setattr(self._msg, name, value)
+
+    def __getattr__(self, name):
+        if name in self._attrs:
+            return getattr(self._msg, name)
+        raise AttributeError(
+            f"'{self.__class__.__name__}' object has no attribute '{name}'")
+
+    def msg(self):
+        """
+        Create a meter config message.
+
+        :return: meter config message
+        """
+        return self._msg
+
+    def _from_msg(self, msg):
+        self._msg.CopyFrom(msg)
+
+    def __str__(self):
+        return str(self.msg())
+
+    @classmethod
+    def set_param(cls, instance, meter_name, meter_type, name, value):
+        """
+        Set the value of a certain meter parameter.
+
+        :param instance: meter instance
+        :param meter_name: meter name
+        :param meter_type: meter type
+        :param name: meter parameter name
+        :param value: meter parameter value
+        :return: updated meter
+        """
+        if instance is None:
+            d = cls(meter_name, meter_type)
+        else:
+            d = instance
+        setattr(d, name, value)
+        return d
+
+    @classmethod
+    def get_param(cls, instance, meter_name, meter_type, name):
+        """
+        Get the value of a certain meter parameter.
+
+        :param instance: meter instance
+        :param meter_name: meter name
+        :param meter_type: meter type
+        :param name: meter parameter name
+        :return: meter with parameter
+        """
+        if instance is None:
+            d = cls(meter_name, meter_type)
+        else:
+            d = instance
+        r = getattr(d, name)
+        return d, r
+
+
+class _IdleTimeout:
+    """
+    P4 idle timeout.
+    """
+
+    @staticmethod
+    def attrs():
+        """
+        Get the attributes in this scope.
+
+        :return: list of scope attributes
+        """
+        return ["elapsed_ns"]
+
+    def __init__(self):
+        self._msg = p4runtime_pb2.TableEntry.IdleTimeout()
+        self._attrs = _IdleTimeout.attrs()
+
+    def __dir__(self):
+        return self._attrs
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name in self._attrs:
+            if not isinstance(value, int):
+                raise UserError(f"{name} must be an integer")
+        setattr(self._msg, name, value)
+
+    def __getattr__(self, name):
+        if name in self._attrs:
+            return getattr(self._msg, name)
+        raise AttributeError(
+            f"'{self.__class__.__name__}' object has no attribute '{name}'")
+
+    def msg(self):
+        """
+        Create an idle timeout message.
+
+        :return: idle timeout message
+        """
+        return self._msg
+
+    def _from_msg(self, msg):
+        self._msg.CopyFrom(msg)
+
+    def __str__(self):
+        return str(self.msg())
+
+    @classmethod
+    def set_param(cls, instance, name, value):
+        """
+        Set the value of a certain idle timeout parameter.
+
+        :param instance: idle timeout instance
+        :param name: idle timeout parameter name
+        :param value: idle timeout parameter value
+        :return: updated idle timeout instance
+        """
+        if instance is None:
+            d = cls()
+        else:
+            d = instance
+        setattr(d, name, value)
+        return d
+
+    @classmethod
+    def get_param(cls, instance, name):
+        """
+        Set the value of a certain idle timeout parameter.
+
+        :param instance: idle timeout instance
+        :param name: idle timeout parameter name
+        :return: idle timeout instance with parameter
+        """
+        if instance is None:
+            d = cls()
+        else:
+            d = instance
+        r = getattr(d, name)
+        return d, r
+
+
+class TableEntry(_P4EntityBase):
+    """
+    P4 table entry.
+    """
+
+    @enum.unique
+    class _ActionSpecType(enum.Enum):
+        NONE = 0
+        DIRECT_ACTION = 1
+        MEMBER_ID = 2
+        GROUP_ID = 3
+        ONESHOT = 4
+
+    @classmethod
+    def _action_spec_name_to_type(cls, name):
+        return {
+            "action": cls._ActionSpecType.DIRECT_ACTION,
+            "member_id": cls._ActionSpecType.MEMBER_ID,
+            "group_id": cls._ActionSpecType.GROUP_ID,
+            "oneshot": cls._ActionSpecType.ONESHOT,
+        }.get(name, None)
+
+    def __init__(self, table_name=None):
+        super().__init__(
+            P4Type.table, P4RuntimeEntity.table_entry,
+            p4runtime_pb2.TableEntry, table_name)
+        self.match = MatchKey(table_name, self._info.match_fields)
+        self._action_spec_type = self._ActionSpecType.NONE
+        self._action_spec = None
+        self.action: Action
+        self.member_id = -1
+        self.group_id = -1
+        self.oneshot = None
+        self.priority = 0
+        self.is_default = False
+        ap = _get_action_profile(table_name)
+        if ap is None:
+            self._support_members = False
+            self._support_groups = False
+        else:
+            self._support_members = True
+            self._support_groups = ap.with_selector
+        self._direct_counter = None
+        self._direct_meter = None
+        for res_id in self._info.direct_resource_ids:
+            prefix = (res_id & 0xff000000) >> 24
+            if prefix == p4info_pb2.P4Ids.DIRECT_COUNTER:
+                self._direct_counter = CONTEXT.get_obj_by_id(res_id)
+            elif prefix == p4info_pb2.P4Ids.DIRECT_METER:
+                self._direct_meter = CONTEXT.get_obj_by_id(res_id)
+        self._counter_data = None
+        self._meter_config = None
+        self.idle_timeout_ns = 0
+        self._time_since_last_hit = None
+        self._idle_timeout_behavior = None
+        table = CONTEXT.get_table(table_name)
+        if table.idle_timeout_behavior > 0:
+            self._idle_timeout_behavior = table.idle_timeout_behavior
+        self.metadata = b""
+        self.__doc__ = f"""
+An entry for table '{table_name}'
+
+Use <self>.info to display the P4Info entry for this table.
+
+To set the match key, use <self>.match['<field name>'] = <expr>.
+Type <self>.match? for more details.
+"""
+        if self._direct_counter is not None:
+            self.__doc__ += """
+To set the counter spec, use <self>.counter_data.byte_count and/or <self>.counter_data.packet_count.
+To unset it, use <self>.counter_data = None or <self>.clear_counter_data().
+"""
+        if self._direct_meter is not None:
+            self.__doc__ += """
+To access the meter config, use <self>.meter_config.<cir|cburst|pir|pburst>.
+To unset it, use <self>.meter_config = None or <self>.clear_meter_config().
+"""
+        if ap is None:
+            self.__doc__ += """
+To set the action specification (this is a direct table):
+<self>.action = <instance of type Action>.
+To set the value of action parameters, use <self>.action['<param name>'] = <expr>.
+Type <self>.action? for more details.
+"""
+        if self._support_members:
+            self.__doc__ += """
+Access the member_id with <self>.member_id.
+"""
+        if self._support_groups:
+            self.__doc__ += """
+Or access the group_id with <self>.group_id.
+"""
+        if self._idle_timeout_behavior is not None:
+            self.__doc__ += """
+To access the time this entry was last hit, use <self>.time_since_last_hit.elapsed_ns.
+To unset it, use <self>.time_since_last_hit = None or <self>.clear_time_since_last_hit().
+"""
+        self.__doc__ += """
+To set the priority, use <self>.priority = <expr>.
+
+To mark the entry as default, use <self>.is_default = True.
+
+To add an idle timeout to the entry, use <self>.idle_timeout_ns = <expr>.
+
+To add metadata to the entry, use <self>.metadata = <expr>.
+"""
+        if ap is None:
+            self.__doc__ += """
+Typical usage to insert a table entry:
+t = table_entry['<table_name>'](action='<action_name>')
+t.match['<f1>'] = ...
+...
+t.match['<fN>'] = ...
+# OR t.match.set(f1=..., ..., fN=...)
+t.action['<p1>'] = ...
+...
+t.action['<pM>'] = ...
+# OR t.action.set(p1=..., ..., pM=...)
+t.insert
+
+Typical usage to set the default entry:
+t = table_entry['<table_name>'](is_default=True)
+t.action['<p1>'] = ...
+...
+t.action['<pM>'] = ...
+# OR t.action.set(p1=..., ..., pM=...)
+t.modify
+"""
+        else:
+            self.__doc__ += """
+Typical usage to insert a table entry:
+t = table_entry['<table_name>']
+t.match['<f1>'] = ...
+...
+t.match['<fN>'] = ...
+# OR t.match.set(f1=..., ..., fN=...)
+t.member_id = <expr>
+"""
+        self.__doc__ += """
+For information about how to read table entries, use <self>.read?
+"""
+
+        self._init = True
+
+    def __dir__(self):
+        d = super().__dir__() + [
+            "match", "priority", "is_default", "idle_timeout_ns", "metadata",
+            "clear_action", "clear_match", "clear_counter_data",
+            "clear_meter_config",
+            "clear_time_since_last_hit"]
+        if self._support_groups:
+            d.extend(["member_id", "group_id", "oneshot"])
+        elif self._support_members:
+            d.append("member_id")
+        else:
+            d.append("action")
+        if self._direct_counter is not None:
+            d.append("counter_data")
+        if self._direct_meter is not None:
+            d.append("meter_config")
+        if self._idle_timeout_behavior is not None:
+            d.append("time_since_last_hit")
+        return d
+
+    def __call__(self, **kwargs):
+        for name, value in kwargs.items():
+            if name == "action" and isinstance(value, str):
+                value = Action(value)
+            setattr(self, name, value)
+        return self
+
+    def _action_spec_set_member(self, member_id):
+        if isinstance(member_id, type(None)):
+            if self._action_spec_type == self._ActionSpecType.MEMBER_ID:
+                super().__setattr__("_action_spec_type",
+                                    self._ActionSpecType.NONE)
+                super().__setattr__("_action_spec", None)
+            return
+        if not isinstance(member_id, int):
+            raise UserError("member_id must be an integer")
+        if not self._support_members:
+            raise UserError("Table does not have an action profile and "
+                            "therefore does not support members")
+        super().__setattr__("_action_spec_type", self._ActionSpecType.MEMBER_ID)
+        super().__setattr__("_action_spec", member_id)
+
+    def _action_spec_set_group(self, group_id):
+        if isinstance(group_id, type(None)):
+            if self._action_spec_type == self._ActionSpecType.GROUP_ID:
+                super().__setattr__("_action_spec_type",
+                                    self._ActionSpecType.NONE)
+                super().__setattr__("_action_spec", None)
+            return
+        if not isinstance(group_id, int):
+            raise UserError("group_id must be an integer")
+        if not self._support_groups:
+            raise UserError(
+                "Table does not have an action profile with selector "
+                "and therefore does not support groups")
+        super().__setattr__("_action_spec_type", self._ActionSpecType.GROUP_ID)
+        super().__setattr__("_action_spec", group_id)
+
+    def _action_spec_set_action(self, action):
+        if isinstance(action, type(None)):
+            if self._action_spec_type == self._ActionSpecType.DIRECT_ACTION:
+                super().__setattr__("_action_spec_type",
+                                    self._ActionSpecType.NONE)
+                super().__setattr__("_action_spec", None)
+            return
+        if not isinstance(action, Action):
+            raise UserError("action must be an instance of Action")
+        if self._info.implementation_id != 0:
+            raise UserError(
+                "Table has an implementation and therefore "
+                "does not support direct actions (P4Runtime 1.0 doesn't "
+                "support writing the default action for indirect tables")
+        if not self._is_valid_action_id(action._action_id):
+            raise UserError(f"action '{action.action_name}' is not a valid "
+                            f"action for this table")
+        super().__setattr__("_action_spec_type",
+                            self._ActionSpecType.DIRECT_ACTION)
+        super().__setattr__("_action_spec", action)
+
+    def _action_spec_set_oneshot(self, oneshot):
+        if isinstance(oneshot, type(None)):
+            if self._action_spec_type == self._ActionSpecType.ONESHOT:
+                super().__setattr__("_action_spec_type",
+                                    self._ActionSpecType.NONE)
+                super().__setattr__("_action_spec", None)
+            return
+        if not isinstance(oneshot, Oneshot):
+            raise UserError("oneshot must be an instance of Oneshot")
+        if not self._support_groups:
+            raise UserError(
+                "Table does not have an action profile with selector "
+                "and therefore does not support oneshot programming")
+        if self.name != oneshot.table_name:
+            raise UserError(
+                "This Oneshot instance was not created for this table")
+        super().__setattr__("_action_spec_type", self._ActionSpecType.ONESHOT)
+        super().__setattr__("_action_spec", oneshot)
+
+    def __setattr__(self, name, value):
+        if name[0] == "_" or not self._init:
+            super().__setattr__(name, value)
+            return
+        if name == "name":
+            raise UserError("Cannot change table name")
+        if name == "priority":
+            if not isinstance(value, int):
+                raise UserError("priority must be an integer")
+        if name == "match" and not isinstance(value, MatchKey):
+            raise UserError("match must be an instance of MatchKey")
+        if name == "is_default":
+            if not isinstance(value, bool):
+                raise UserError("is_default must be a boolean")
+            # TODO: handle other cases  # pylint: disable=W0511
+            # is_default is set to True)?
+            if value is True and self.match._count() > 0:
+                print("Clearing match key because entry is now default")
+                self.match.clear()
+        if name == "member_id":
+            self._action_spec_set_member(value)
+            return
+        if name == "group_id":
+            self._action_spec_set_group(value)
+            return
+        if name == "oneshot":
+            self._action_spec_set_oneshot(value)
+        if name == "action" and value is not None:
+            self._action_spec_set_action(value)
+            return
+        if name == "counter_data":
+            if self._direct_counter is None:
+                raise UserError("Table has no direct counter")
+            if value is None:
+                self._counter_data = None
+                return
+            raise UserError("Cannot set 'counter_data' directly")
+        if name == "meter_config":
+            if self._direct_meter is None:
+                raise UserError("Table has no direct meter")
+            if value is None:
+                self._meter_config = None
+                return
+            raise UserError("Cannot set 'meter_config' directly")
+        if name == "idle_timeout_ns":
+            if not isinstance(value, int):
+                raise UserError("idle_timeout_ns must be an integer")
+        if name == "time_since_last_hit":
+            if self._idle_timeout_behavior is None:
+                raise UserError("Table has no idle timeouts")
+            if value is None:
+                self._time_since_last_hit = None
+                return
+            raise UserError("Cannot set 'time_since_last_hit' directly")
+        if name == "metadata":
+            if not isinstance(value, bytes):
+                raise UserError("metadata must be a byte string")
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name == "counter_data":
+            if self._direct_counter is None:
+                raise UserError("Table has no direct counter")
+            if self._counter_data is None:
+                self._counter_data = _CounterData(
+                    self._direct_counter.preamble.name,
+                    self._direct_counter.spec.unit)
+            return self._counter_data
+        if name == "meter_config":
+            if self._direct_meter is None:
+                raise UserError("Table has no direct meter")
+            if self._meter_config is None:
+                self._meter_config = _MeterConfig(
+                    self._direct_meter.preamble.name,
+                    self._direct_meter.spec.unit)
+            return self._meter_config
+        if name == "time_since_last_hit":
+            if self._idle_timeout_behavior is None:
+                raise UserError("Table has no idle timeouts")
+            if self._time_since_last_hit is None:
+                self._time_since_last_hit = _IdleTimeout()
+            return self._time_since_last_hit
+
+        t = self._action_spec_name_to_type(name)
+        if t is None:
+            return super().__getattr__(name)
+        if self._action_spec_type == t:
+            return self._action_spec
+        if t == self._ActionSpecType.ONESHOT:
+            self._action_spec_type = self._ActionSpecType.ONESHOT
+            self._action_spec = Oneshot(self.name)
+            return self._action_spec
+        return None
+
+    def _is_valid_action_id(self, action_id):
+        for action_ref in self._info.action_refs:
+            if action_id == action_ref.id:
+                return True
+        return False
+
+    def _from_msg(self, msg):
+        self.priority = msg.priority
+        self.is_default = msg.is_default_action
+        self.idle_timeout_ns = msg.idle_timeout_ns
+        self.metadata = msg.metadata
+        for mf in msg.match:
+            mf_name = CONTEXT.get_mf_name(self.name, mf.field_id)
+            self.match._mk[mf_name] = mf
+        if msg.action.HasField('action'):
+            action = msg.action.action
+            action_name = CONTEXT.get_name_from_id(action.action_id)
+            self.action = Action(action_name)
+            self.action._from_msg(action)
+        elif msg.action.HasField('action_profile_member_id'):
+            self.member_id = msg.action.action_profile_member_id
+        elif msg.action.HasField('action_profile_group_id'):
+            self.group_id = msg.action.action_profile_group_id
+        elif msg.action.HasField('action_profile_action_set'):
+            self.oneshot = Oneshot(self.name)
+            self.oneshot._from_msg(msg.action.action_profile_action_set)
+        if msg.HasField('counter_data'):
+            self._counter_data = _CounterData(
+                self._direct_counter.preamble.name,
+                self._direct_counter.spec.unit)
+            self._counter_data._from_msg(msg.counter_data)
+        else:
+            self._counter_data = None
+        if msg.HasField('meter_config'):
+            self._meter_config = _MeterConfig(
+                self._direct_meter.preamble.name, self._direct_meter.spec.unit)
+            self._meter_config._from_msg(msg.meter_config)
+        else:
+            self._meter_config = None
+        if msg.HasField("time_since_last_hit"):
+            self._time_since_last_hit = _IdleTimeout()
+            self._time_since_last_hit._from_msg(msg.time_since_last_hit)
+        else:
+            self._time_since_last_hit = None
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the appropriate fields unset).
+        If function is None, returns an iterator. Iterate over it to get all the
+        table entries (TableEntry instances) returned by the server. Otherwise,
+        function is applied to all the table entries returned by the server.
+
+        For example:
+        for te in <self>.read():
+            print(te)
+        The above code is equivalent to the following one-liner:
+        <self>.read(lambda te: print(te))
+
+        To delete all the entries from a table, simply use:
+        table_entry['<table_name>'].read(function=lambda x: x.delete())
+        """
+        return super().read(function)
+
+    def _update_msg(self):
+        entry = p4runtime_pb2.TableEntry()
+        entry.table_id = self.id
+        entry.match.extend(self.match._mk.values())
+        entry.priority = self.priority
+        entry.is_default_action = self.is_default
+        entry.idle_timeout_ns = self.idle_timeout_ns
+        entry.metadata = self.metadata
+        if self._action_spec_type == self._ActionSpecType.DIRECT_ACTION:
+            entry.action.action.CopyFrom(self._action_spec.msg())
+        elif self._action_spec_type == self._ActionSpecType.MEMBER_ID:
+            entry.action.action_profile_member_id = self._action_spec
+        elif self._action_spec_type == self._ActionSpecType.GROUP_ID:
+            entry.action.action_profile_group_id = self._action_spec
+        elif self._action_spec_type == self._ActionSpecType.ONESHOT:
+            entry.action.action_profile_action_set.CopyFrom(
+                self._action_spec.msg())
+        if self._counter_data is None:
+            entry.ClearField('counter_data')
+        else:
+            entry.counter_data.CopyFrom(self._counter_data.msg())
+        if self._meter_config is None:
+            entry.ClearField('meter_config')
+        else:
+            entry.meter_config.CopyFrom(self._meter_config.msg())
+        if self._time_since_last_hit is None:
+            entry.ClearField("time_since_last_hit")
+        else:
+            entry.time_since_last_hit.CopyFrom(self._time_since_last_hit.msg())
+        self._entry = entry
+
+    def _validate_msg(self):
+        if self.is_default and self.match._count() > 0:
+            raise UserError("Match key must be empty for default entry, "
+                            "use <self>.is_default = False "
+                            "or <self>.match.clear "
+                            "(whichever one is appropriate)")
+
+    def clear_action(self):
+        """Clears the action spec for the TableEntry."""
+        super().__setattr__("_action_spec_type", self._ActionSpecType.NONE)
+        super().__setattr__("_action_spec", None)
+
+    def clear_match(self):
+        """Clears the match spec for the TableEntry."""
+        self.match.clear()
+
+    def clear_counter_data(self):
+        """Clear all counter data, same as <self>.counter_data = None"""
+        self._counter_data = None
+
+    def clear_meter_config(self):
+        """Clear the meter config, same as <self>.meter_config = None"""
+        self._meter_config = None
+
+    def clear_time_since_last_hit(self):
+        """Clear the idle timeout, same as <self>.time_since_last_hit = None"""
+        self._time_since_last_hit = None
+
+
+class _CounterEntryBase(_P4EntityBase):
+    """
+    Basic P4 counter entry.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._counter_type = self._info.spec.unit
+        self.packet_count = -1
+        self.byte_count = -1
+        self._data = None
+
+    def __dir__(self):
+        return super().__dir__() + _CounterData.attrs_for_counter_type(
+            self._counter_type) + [
+                   "clear_data"]
+
+    def __call__(self, **kwargs):
+        for name, value in kwargs.items():
+            setattr(self, name, value)
+        return self
+
+    def __setattr__(self, name, value):
+        if name[0] == "_" or not self._init:
+            super().__setattr__(name, value)
+            return
+        if name == "name":
+            raise UserError("Cannot change counter name")
+        if name in ("byte_count", "packet_count"):
+            self._data = _CounterData.set_count(
+                self._data, self.name, self._counter_type, name, value)
+            return
+        if name == "data":
+            if value is None:
+                self._data = None
+                return
+            raise UserError("Cannot set 'data' directly")
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name in ("byte_count", "packet_count"):
+            self._data, r = _CounterData.get_count(
+                self._data, self.name, self._counter_type, name)
+            return r
+        if name == "data":
+            if self._data is None:
+                self._data = _CounterData(self.name, self._counter_type)
+            return self._data
+        return super().__getattr__(name)
+
+    def _from_msg(self, msg):
+        self._entry.CopyFrom(msg)
+        if msg.HasField('data'):
+            self._data = _CounterData(self.name, self._counter_type)
+            self._data._from_msg(msg.data)
+        else:
+            self._data = None
+
+    def _update_msg(self):
+        if self._data is None:
+            self._entry.ClearField('data')
+        else:
+            self._entry.data.CopyFrom(self._data.msg())
+
+    def clear_data(self):
+        """Clear all counter data, same as <self>.data = None"""
+        self._data = None
+
+
+class CounterEntry(_CounterEntryBase):
+    """
+    P4 counter entry.
+    """
+
+    def __init__(self, counter_name=None):
+        super().__init__(
+            P4Type.counter, P4RuntimeEntity.counter_entry,
+            p4runtime_pb2.CounterEntry, counter_name,
+            modify_only=True)
+        self._entry.counter_id = self.id
+        self.index = -1
+        self.__doc__ = f"""
+An entry for counter '{counter_name}'
+
+Use <self>.info to display the P4Info entry for this counter.
+
+Set the index with <self>.index = <expr>.
+To reset it (e.g. for wildcard read), set it to None.
+
+Access byte count and packet count with <self>.byte_count / <self>.packet_count.
+
+To read from the counter, use <self>.read
+To write to the counter, use <self>.modify
+"""
+        self._init = True
+
+    def __dir__(self):
+        return super().__dir__() + ["index", "data"]
+
+    def __setattr__(self, name, value):
+        if name == "index":
+            if value is None:
+                self._entry.ClearField('index')
+                return
+            if not isinstance(value, int):
+                raise UserError("index must be an integer")
+            self._entry.index.index = value
+            return
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name == "index":
+            return self._entry.index.index
+        return super().__getattr__(name)
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the index unset).
+        If function is None, returns an iterator. Iterate over it to get all the
+        counter entries (CounterEntry instances) returned by the
+        server. Otherwise, function is applied to all the counter entries
+        returned by the server.
+
+        For example:
+        for c in <self>.read():
+            print(c)
+        The above code is equivalent to the following one-liner:
+        <self>.read(lambda c: print(c))
+        """
+        return super().read(function)
+
+
+class DirectCounterEntry(_CounterEntryBase):
+    """
+    Direct P4 counter entry.
+    """
+
+    def __init__(self, direct_counter_name=None):
+        super().__init__(
+            P4Type.direct_counter, P4RuntimeEntity.direct_counter_entry,
+            p4runtime_pb2.DirectCounterEntry, direct_counter_name,
+            modify_only=True)
+        self._direct_table_id = self._info.direct_table_id
+        try:
+            self._direct_table_name = CONTEXT.get_name_from_id(
+                self._direct_table_id)
+        except KeyError as ex:
+            raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} "
+                                     f"is not a valid table id") from ex
+        self._table_entry = TableEntry(self._direct_table_name)
+        self.__doc__ = f"""
+An entry for direct counter '{direct_counter_name}'
+
+Use <self>.info to display the P4Info entry for this direct counter.
+
+Set the table_entry with <self>.table_entry = <TableEntry instance>.
+The TableEntry instance must be for the table to which the direct counter is
+attached.
+To reset it (e.g. for wildcard read), set it to None. It is the same as:
+<self>.table_entry = TableEntry({self._direct_table_name})
+
+Access byte count and packet count with <self>.byte_count / <self>.packet_count.
+
+To read from the counter, use <self>.read
+To write to the counter, use <self>.modify
+"""
+        self._init = True
+
+    def __dir__(self):
+        return super().__dir__() + ["table_entry"]
+
+    def __setattr__(self, name, value):
+        if name == "index":
+            raise UserError("Direct counters are not index-based")
+        if name == "table_entry":
+            if value is None:
+                self._table_entry = TableEntry(self._direct_table_name)
+                return
+            if not isinstance(value, TableEntry):
+                raise UserError("table_entry must be an instance of TableEntry")
+            if value.name != self._direct_table_name:
+                raise UserError(f"This DirectCounterEntry is for "
+                                f"table '{self._direct_table_name}'")
+            self._table_entry = value
+            return
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name == "index":
+            raise UserError("Direct counters are not index-based")
+        if name == "table_entry":
+            return self._table_entry
+        return super().__getattr__(name)
+
+    def _update_msg(self):
+        super()._update_msg()
+        if self._table_entry is None:
+            self._entry.ClearField('table_entry')
+        else:
+            self._entry.table_entry.CopyFrom(self._table_entry.msg())
+
+    def _from_msg(self, msg):
+        super()._from_msg(msg)
+        if msg.HasField('table_entry'):
+            self._table_entry._from_msg(msg.table_entry)
+        else:
+            self._table_entry = None
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the index unset).
+        If function is None, returns an iterator. Iterate over it to get all the
+        direct counter entries (DirectCounterEntry instances) returned by the
+        server. Otherwise, function is applied to all the direct counter entries
+        returned by the server.
+
+        For example:
+        for c in <self>.read():
+            print(c)
+        The above code is equivalent to the following one-liner:
+        <self>.read(lambda c: print(c))
+        """
+        return super().read(function)
+
+
+class _MeterEntryBase(_P4EntityBase):
+    """
+    Basic P4 meter entry.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._meter_type = self._info.spec.unit
+        self.index = -1
+        self.cir = -1
+        self.cburst = -1
+        self.pir = -1
+        self.pburst = -1
+        self._config = None
+
+    def __dir__(self):
+        return super().__dir__() + _MeterConfig.attrs() + ["clear_config"]
+
+    def __call__(self, **kwargs):
+        for name, value in kwargs.items():
+            setattr(self, name, value)
+        return self
+
+    def __setattr__(self, name, value):
+        if name[0] == "_" or not self._init:
+            super().__setattr__(name, value)
+            return
+        if name == "name":
+            raise UserError("Cannot change meter name")
+        if name in _MeterConfig.attrs():
+            self._config = _MeterConfig.set_param(
+                self._config, self.name, self._meter_type, name, value)
+            return
+        if name == "config":
+            if value is None:
+                self._config = None
+                return
+            raise UserError("Cannot set 'config' directly")
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name in _MeterConfig.attrs():
+            self._config, r = _MeterConfig.get_param(
+                self._config, self.name, self._meter_type, name)
+            return r
+        if name == "config":
+            if self._config is None:
+                self._config = _MeterConfig(self.name, self._meter_type)
+            return self._config
+        return super().__getattr__(name)
+
+    def _from_msg(self, msg):
+        self._entry.CopyFrom(msg)
+        if msg.HasField('config'):
+            self._config = _MeterConfig(self.name, self._meter_type)
+            self._config._from_msg(msg.config)
+        else:
+            self._config = None
+
+    def _update_msg(self):
+        if self._config is None:
+            self._entry.ClearField('config')
+        else:
+            self._entry.config.CopyFrom(self._config.msg())
+
+    def clear_config(self):
+        """Clear the meter config, same as <self>.config = None"""
+        self._config = None
+
+
+class MeterEntry(_MeterEntryBase):
+    """
+    P4 meter entry.
+    """
+
+    def __init__(self, meter_name=None):
+        super().__init__(
+            P4Type.meter, P4RuntimeEntity.meter_entry,
+            p4runtime_pb2.MeterEntry, meter_name,
+            modify_only=True)
+        self._entry.meter_id = self.id
+        self.__doc__ = f"""
+An entry for meter '{meter_name}'
+
+Use <self>.info to display the P4Info entry for this meter.
+
+Set the index with <self>.index = <expr>.
+To reset it (e.g. for wildcard read), set it to None.
+
+Access meter rates and burst sizes with:
+<self>.cir
+<self>.cburst
+<self>.pir
+<self>.pburst
+
+To read from the meter, use <self>.read
+To write to the meter, use <self>.modify
+"""
+        self._init = True
+
+    def __dir__(self):
+        return super().__dir__() + ["index", "config"]
+
+    def __setattr__(self, name, value):
+        if name == "index":
+            if value is None:
+                self._entry.ClearField('index')
+                return
+            if not isinstance(value, int):
+                raise UserError("index must be an integer")
+            self._entry.index.index = value
+            return
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name == "index":
+            return self._entry.index.index
+        return super().__getattr__(name)
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the index unset).
+        If function is None, returns an iterator. Iterate over it to get all the
+        meter entries (MeterEntry instances) returned by the
+        server. Otherwise, function is applied to all the meter entries
+        returned by the server.
+
+        For example:
+        for c in <self>.read():
+            print(c)
+        The above code is equivalent to the following one-liner:
+        <self>.read(lambda c: print(c))
+        """
+        return super().read(function)
+
+
+class DirectMeterEntry(_MeterEntryBase):
+    """
+    Direct P4 meter entry.
+    """
+
+    def __init__(self, direct_meter_name=None):
+        super().__init__(
+            P4Type.direct_meter, P4RuntimeEntity.direct_meter_entry,
+            p4runtime_pb2.DirectMeterEntry, direct_meter_name,
+            modify_only=True)
+        self._direct_table_id = self._info.direct_table_id
+        try:
+            self._direct_table_name = CONTEXT.get_name_from_id(
+                self._direct_table_id)
+        except KeyError as ex:
+            raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} "
+                                     f"is not a valid table id") from ex
+        self._table_entry = TableEntry(self._direct_table_name)
+        self.__doc__ = f"""
+An entry for direct meter '{direct_meter_name}'
+
+Use <self>.info to display the P4Info entry for this direct meter.
+
+Set the table_entry with <self>.table_entry = <TableEntry instance>.
+The TableEntry instance must be for the table to which the direct meter is attached.
+To reset it (e.g. for wildcard read), set it to None. It is the same as:
+<self>.table_entry = TableEntry({self._direct_table_name})
+
+Access meter rates and burst sizes with:
+<self>.cir
+<self>.cburst
+<self>.pir
+<self>.pburst
+
+To read from the meter, use <self>.read
+To write to the meter, use <self>.modify
+"""
+        self._init = True
+
+    def __dir__(self):
+        return super().__dir__() + ["table_entry"]
+
+    def __setattr__(self, name, value):
+        if name == "index":
+            raise UserError("Direct meters are not index-based")
+        if name == "table_entry":
+            if value is None:
+                self._table_entry = TableEntry(self._direct_table_name)
+                return
+            if not isinstance(value, TableEntry):
+                raise UserError("table_entry must be an instance of TableEntry")
+            if value.name != self._direct_table_name:
+                raise UserError(f"This DirectMeterEntry is for "
+                                f"table '{self._direct_table_name}'")
+            self._table_entry = value
+            return
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name == "index":
+            raise UserError("Direct meters are not index-based")
+        if name == "table_entry":
+            return self._table_entry
+        return super().__getattr__(name)
+
+    def _update_msg(self):
+        super()._update_msg()
+        if self._table_entry is None:
+            self._entry.ClearField('table_entry')
+        else:
+            self._entry.table_entry.CopyFrom(self._table_entry.msg())
+
+    def _from_msg(self, msg):
+        super()._from_msg(msg)
+        if msg.HasField('table_entry'):
+            self._table_entry._from_msg(msg.table_entry)
+        else:
+            self._table_entry = None
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the index unset).
+        If function is None, returns an iterator. Iterate over it to get all the
+        direct meter entries (DirectMeterEntry instances) returned by the
+        server. Otherwise, function is applied to all the direct meter entries
+        returned by the server.
+
+        For example:
+        for c in <self>.read():
+            print(c)
+        The above code is equivalent to the following one-liner:
+        <self>.read(lambda c: print(c))
+        """
+        return super().read(function)
+
+
+class P4RuntimeEntityBuilder:
+    """
+    P4 entity builder.
+    """
+
+    def __init__(self, obj_type, entity_type, entity_cls):
+        self._obj_type = obj_type
+        self._names = sorted([name for name, _ in CONTEXT.get_objs(obj_type)])
+        self._entity_type = entity_type
+        self._entity_cls = entity_cls
+        self.__doc__ = f"""Construct a {entity_cls.__name__} entity
+Usage: <var> = {entity_type.name}["<{obj_type.pretty_name} name>"]
+This is equivalent to <var>={entity_cls.__name__}(<{obj_type.pretty_name} name>)
+Use command '{obj_type.p4info_name}' to see list of {obj_type.pretty_names}
+"""
+
+    def _ipython_key_completions_(self):
+        return self._names
+
+    def __getitem__(self, name):
+        obj = CONTEXT.get_obj(self._obj_type, name)
+        if obj is None:
+            raise UserError(
+                f"{self._obj_type.pretty_name} '{name}' does not exist")
+        return self._entity_cls(name)
+
+    def __setitem__(self, name, value):
+        raise UserError("Operation not allowed")
+
+    def __str__(self):
+        return f"Construct a {self.entity_cls.__name__} entity"
+
+
+class Replica:
+    """
+    A port "replica" (port number + instance id) used for multicast
+    and clone session programming.
+    Construct with Replica(egress_port, instance=<instance>).
+    You can set / get attributes egress_port (required), instance (default 0).
+    """
+
+    def __init__(self, egress_port=None, instance=0):
+        if egress_port is None:
+            raise UserError("egress_port is required")
+        self._msg = p4runtime_pb2.Replica()
+        self._msg.egress_port = egress_port
+        self._msg.instance = instance
+
+    def __dir__(self):
+        return ["port", "egress_port", "instance"]
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name in ("egress_port", "port"):
+            if not isinstance(value, int):
+                raise UserError("egress_port must be an integer")
+            self._msg.egress_port = value
+            return
+        if name == "instance":
+            if not isinstance(value, int):
+                raise UserError("instance must be an integer")
+            self._msg.instance = value
+            return
+        super().__setattr__(name, value)
+
+    def __getattr__(self, name):
+        if name in ("egress_port", "port"):
+            return self._msg.egress_port
+        if name == "instance":
+            return self._msg.instance
+        return super().__getattr__(name)
+
+    def __str__(self):
+        return str(self._msg)
+
+
+class MulticastGroupEntry(_EntityBase):
+    """
+    P4 multicast group entry.
+    """
+
+    def __init__(self, group_id=0):
+        super().__init__(
+            P4RuntimeEntity.packet_replication_engine_entry,
+            p4runtime_pb2.PacketReplicationEngineEntry)
+        self.group_id = group_id
+        self.replicas = []
+        self.__doc__ = """
+Multicast group entry.
+Create an instance with multicast_group_entry(<group_id>).
+Add replicas with <self>.add(<eg_port_1>, <instance_1>).add(<eg_port_2>, <instance_2>)...
+"""
+        self._init = True
+
+    def __dir__(self):
+        return ["group_id", "replicas"]
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name == "group_id":
+            if not isinstance(value, int):
+                raise UserError("group_id must be an integer")
+        if name == "replicas":
+            if not isinstance(value, list):
+                raise UserError("replicas must be a list of Replica objects")
+            for r in value:
+                if not isinstance(r, Replica):
+                    raise UserError(
+                        "replicas must be a list of Replica objects")
+        super().__setattr__(name, value)
+
+    def _from_msg(self, msg):
+        self.group_id = msg.multicast_group_entry.multicast_group_id
+        for r in msg.multicast_group_entry.replicas:
+            self.add(r.egress_port, r.instance)
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the group_id as 0).
+        If function is None, returns an iterator. Iterate over it to get all the
+        multicast group entries (MulticastGroupEntry instances) returned by the
+        server. Otherwise, function is applied to all the multicast group entries
+        returned by the server.
+
+        For example:
+        for c in <self>.read():
+            print(c)
+        The above code is equivalent to the following one-liner:
+        <self>.read(lambda c: print(c))
+        """
+        return super().read(function)
+
+    def _update_msg(self):
+        entry = p4runtime_pb2.PacketReplicationEngineEntry()
+        mcg_entry = entry.multicast_group_entry
+        mcg_entry.multicast_group_id = self.group_id
+        for replica in self.replicas:
+            r = mcg_entry.replicas.add()
+            r.CopyFrom(replica._msg)
+        self._entry = entry
+
+    def add(self, egress_port=None, instance=0):
+        """Add a replica to the multicast group."""
+        self.replicas.append(Replica(egress_port, instance))
+        return self
+
+    def _write(self, type_):
+        if self.group_id == 0:
+            raise UserError("0 is not a valid group_id for MulticastGroupEntry")
+        super()._write(type_)
+
+
+class CloneSessionEntry(_EntityBase):
+    """
+    P4 clone session entry.
+    """
+
+    def __init__(self, session_id=0):
+        super().__init__(
+            P4RuntimeEntity.packet_replication_engine_entry,
+            p4runtime_pb2.PacketReplicationEngineEntry)
+        self.session_id = session_id
+        self.replicas = []
+        self.cos = 0
+        self.packet_length_bytes = 0
+        self.__doc__ = """
+Clone session entry.
+Create an instance with clone_session_entry(<session_id>).
+Add replicas with <self>.add(<eg_port_1>, <instance_1>).add(<eg_port_2>,
+<instance_2>)...
+Access class of service with <self>.cos.
+Access truncation length with <self>.packet_length_bytes.
+"""
+        self._init = True
+
+    def __dir__(self):
+        return ["session_id", "replicas", "cos", "packet_length_bytes"]
+
+    def __setattr__(self, name, value):
+        if name[0] == "_":
+            super().__setattr__(name, value)
+            return
+        if name == "session_id":
+            if not isinstance(value, int):
+                raise UserError("session_id must be an integer")
+        if name == "replicas":
+            if not isinstance(value, list):
+                raise UserError("replicas must be a list of Replica objects")
+            for r in value:
+                if not isinstance(r, Replica):
+                    raise UserError(
+                        "replicas must be a list of Replica objects")
+        if name == "cos":
+            if not isinstance(value, int):
+                raise UserError("cos must be an integer")
+        if name == "packet_length_bytes":
+            if not isinstance(value, int):
+                raise UserError("packet_length_bytes must be an integer")
+        super().__setattr__(name, value)
+
+    def _from_msg(self, msg):
+        self.session_id = msg.clone_session_entry.session_id
+        for r in msg.clone_session_entry.replicas:
+            self.add(r.egress_port, r.instance)
+        self.cos = msg.clone_session_entry.class_of_service
+        self.packet_length_bytes = msg.clone_session_entry.packet_length_bytes
+
+    def read(self, function=None):
+        """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
+        the session_id as 0).
+        If function is None, returns an iterator. Iterate over it to get all the
+        clone session entries (CloneSessionEntry instances) returned by the
+        server. Otherwise, function is applied to all the clone session entries
+        returned by the server.
+
+        For example:
+        for c in <self>.read():
+            print(c)
+        The above code is equivalent to the following one-liner:
+        <self>.read(lambda c: print(c))
+        """
+        return super().read(function)
+
+    def _update_msg(self):
+        entry = p4runtime_pb2.PacketReplicationEngineEntry()
+        cs_entry = entry.clone_session_entry
+        cs_entry.session_id = self.session_id
+        for replica in self.replicas:
+            r = cs_entry.replicas.add()
+            r.CopyFrom(replica._msg)
+        cs_entry.class_of_service = self.cos
+        cs_entry.packet_length_bytes = self.packet_length_bytes
+        self._entry = entry
+
+    def add(self, egress_port=None, instance=0):
+        """Add a replica to the clone session."""
+        self.replicas.append(Replica(egress_port, instance))
+        return self
+
+    def _write(self, type_):
+        if self.session_id == 0:
+            raise UserError("0 is not a valid group_id for CloneSessionEntry")
+        super()._write(type_)
+
+
+class PacketMetadata:
+    """
+    P4 packet metadata.
+    """
+
+    def __init__(self, metadata_info_list):
+        self._md_info = OrderedDict()
+        self._md = OrderedDict()
+        # Initialize every metadata to zero value
+        for md in metadata_info_list:
+            self._md_info[md.name] = md
+            self._md[md.name] = self._parse_md('0', md)
+        self._set_docstring()
+
+    def _set_docstring(self):
+        self.__doc__ = "Available metadata:\n\n"
+        for _, info in self._md_info.items():
+            self.__doc__ += str(info)
+        self.__doc__ += """
+Set a metadata value with <self>.['<metadata_name>'] = '...'
+
+You may also use <self>.set(<md_name>='<value>')
+"""
+
+    def __dir__(self):
+        return ["clear"]
+
+    def _get_md_info(self, name):
+        if name in self._md_info:
+            return self._md_info[name]
+        raise UserError(f"'{name}' is not a valid metadata name")
+
+    def __getitem__(self, name):
+        _ = self._get_md_info(name)
+        print(self._md.get(name, "Unset"))
+
+    def _parse_md(self, value, md_info):
+        if not isinstance(value, str):
+            raise UserError("Metadata value must be a string")
+        md = p4runtime_pb2.PacketMetadata()
+        md.metadata_id = md_info.id
+        md.value = encode(value.strip(), md_info.bitwidth)
+        return md
+
+    def __setitem__(self, name, value):
+        md_info = self._get_md_info(name)
+        self._md[name] = self._parse_md(value, md_info)
+
+    def _ipython_key_completions_(self):
+        return self._md_info.keys()
+
+    def set(self, **kwargs):
+        """
+        Set packet metadata parameters.
+
+        :param kwargs: packet metadata parameter map
+        :return: void
+        """
+        for name, value in kwargs.items():
+            self[name] = value
+
+    def clear(self):
+        """
+        Clear packet metadata.
+
+        :return: void
+        """
+        self._md.clear()
+
+    def values(self):
+        """
+        Get packet metadata values.
+
+        :return: list of packet metadata values
+        """
+        return self._md.values()
+
+
+class PacketIn():
+    """
+    P4 packet in.
+    """
+
+    def __init__(self):
+        ctrl_pkt_md = P4Objects(P4Type.controller_packet_metadata)
+        self.md_info_list = {}
+        if "packet_in" in ctrl_pkt_md:
+            self.p4_info = ctrl_pkt_md["packet_in"]
+            for md_info in self.p4_info.metadata:
+                self.md_info_list[md_info.name] = md_info
+        self.packet_in_queue = queue.Queue()
+
+        def _packet_in_recv_func(packet_in_queue):
+            while True:
+                msg = CLIENT.get_stream_packet("packet", timeout=None)
+                if not msg:
+                    break
+                packet_in_queue.put(msg)
+
+        self.recv_t = Thread(target=_packet_in_recv_func,
+                             args=(self.packet_in_queue,))
+        self.recv_t.start()
+
+    def sniff(self, function=None, timeout=None):
+        """
+        Return an iterator of packet-in messages.
+        If the function is provided, we do not return an iterator;
+        instead we apply the function to every packet-in message.
+
+        :param function: packet-in function
+        :param timeout: timeout in seconds
+        :return: list of packet-in messages
+        """
+        msgs = []
+
+        if timeout is not None and timeout < 0:
+            raise ValueError("Timeout can't be a negative number.")
+
+        if timeout is None:
+            while True:
+                try:
+                    msgs.append(self.packet_in_queue.get(block=True))
+                except KeyboardInterrupt:
+                    # User sends a Ctrl+C -> breaking
+                    break
+
+        else:  # timeout parameter is provided
+            deadline = time.time() + timeout
+            remaining_time = timeout
+            while remaining_time > 0:
+                try:
+                    msgs.append(self.packet_in_queue.get(block=True,
+                                                         timeout=remaining_time))
+                    remaining_time = deadline - time.time()
+                except KeyboardInterrupt:
+                    # User sends an interrupt(e.g., Ctrl+C).
+                    break
+                except queue.Empty:
+                    # No item available on timeout. Exiting
+                    break
+
+        if function is None:
+            return iter(msgs)
+        for msg in msgs:
+            function(msg)
+
+    def str(self):
+        """
+        Packet-in metadata to string.
+
+        :return: void
+        """
+        for name, info in self.md_info_list.itmes():
+            print(f"Packet-in metadata attribute '{name}':'{info}'")
+
+
+class PacketOut:
+    """
+    P4 packet out.
+    """
+
+    def __init__(self, payload=b'', **kwargs):
+
+        self.p4_info = P4Objects(P4Type.controller_packet_metadata)[
+            "packet_out"]
+        self._entry = None
+        self.payload = payload
+        self.metadata = PacketMetadata(self.p4_info.metadata)
+        if kwargs:
+            for key, value in kwargs.items():
+                self.metadata[key] = value
+
+    def _update_msg(self):
+        self._entry = p4runtime_pb2.PacketOut()
+        self._entry.payload = self.payload
+        self._entry.metadata.extend(self.metadata.values())
+
+    def __setattr__(self, name, value):
+        if name == "payload" and not isinstance(value, bytes):
+            raise UserError("payload must be a bytes type")
+        if name == "metadata" and not isinstance(value, PacketMetadata):
+            raise UserError("metadata must be a PacketMetadata type")
+        return super().__setattr__(name, value)
+
+    def __dir__(self):
+        return ["metadata", "send", "payload"]
+
+    def __str__(self):
+        self._update_msg()
+        return str(self._entry)
+
+    def send(self):
+        """
+        Send a packet-out message.
+
+        :return: void
+        """
+        self._update_msg()
+        msg = p4runtime_pb2.StreamMessageRequest()
+        msg.packet.CopyFrom(self._entry)
+        CLIENT.stream_out_q.put(msg)
+
+    def str(self):
+        """
+        Packet-out metadata to string.
+
+        :return: void
+        """
+        for key, value in self.metadata.itmes():
+            print(f"Packet-out metadata attribute '{key}':'{value}'")
+
+
+class IdleTimeoutNotification():
+    """
+    P4 idle timeout notification.
+    """
+
+    def __init__(self):
+        self.notification_queue = queue.Queue()
+
+        def _notification_recv_func(notification_queue):
+            while True:
+                msg = CLIENT.get_stream_packet("idle_timeout_notification",
+                                               timeout=None)
+                if not msg:
+                    break
+                notification_queue.put(msg)
+
+        self.recv_t = Thread(target=_notification_recv_func,
+                             args=(self.notification_queue,))
+        self.recv_t.start()
+
+    def sniff(self, function=None, timeout=None):
+        """
+        Return an iterator of notification messages.
+        If the function is provided, we do not return an iterator and instead we apply
+        the function to every notification message.
+        """
+        msgs = []
+
+        if timeout is not None and timeout < 0:
+            raise ValueError("Timeout can't be a negative number.")
+
+        if timeout is None:
+            while True:
+                try:
+                    msgs.append(self.notification_queue.get(block=True))
+                except KeyboardInterrupt:
+                    # User sends a Ctrl+C -> breaking
+                    break
+
+        else:  # timeout parameter is provided
+            deadline = time.time() + timeout
+            remaining_time = timeout
+            while remaining_time > 0:
+                try:
+                    msgs.append(self.notification_queue.get(block=True,
+                                                            timeout=remaining_time))
+                    remaining_time = deadline - time.time()
+                except KeyboardInterrupt:
+                    # User sends an interrupt(e.g., Ctrl+C).
+                    break
+                except queue.Empty:
+                    # No item available on timeout. Exiting
+                    break
+
+        if function is None:
+            return iter(msgs)
+        for msg in msgs:
+            function(msg)
diff --git a/src/device/service/drivers/p4/p4_util.py b/src/device/service/drivers/p4/p4_util.py
deleted file mode 100644
index b3d54499f56772768dc19bc1cae3bbf9a25e7dc2..0000000000000000000000000000000000000000
--- a/src/device/service/drivers/p4/p4_util.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-P4 driver utilities.
-"""
-
-import logging
-import queue
-import sys
-import threading
-from functools import wraps
-import grpc
-import google.protobuf.text_format
-from google.rpc import code_pb2
-
-from p4.v1 import p4runtime_pb2
-from p4.v1 import p4runtime_pb2_grpc
-
-P4_ATTR_DEV_ID = 'id'
-P4_ATTR_DEV_NAME = 'name'
-P4_ATTR_DEV_VENDOR = 'vendor'
-P4_ATTR_DEV_HW_VER = 'hw_ver'
-P4_ATTR_DEV_SW_VER = 'sw_ver'
-P4_ATTR_DEV_PIPECONF = 'pipeconf'
-
-P4_VAL_DEF_VENDOR = 'Unknown'
-P4_VAL_DEF_HW_VER = 'BMv2 simple_switch'
-P4_VAL_DEF_SW_VER = 'Stratum'
-P4_VAL_DEF_PIPECONF = 'org.onosproject.pipelines.fabric'
-
-STREAM_ATTR_ARBITRATION = 'arbitration'
-STREAM_ATTR_PACKET = 'packet'
-STREAM_ATTR_DIGEST = 'digest'
-STREAM_ATTR_UNKNOWN = 'unknown'
-
-LOGGER = logging.getLogger(__name__)
-
-
-class P4RuntimeException(Exception):
-    """
-    P4Runtime exception handler.
-
-    Attributes
-    ----------
-    grpc_error : object
-        gRPC error
-    """
-
-    def __init__(self, grpc_error):
-        super().__init__()
-        self.grpc_error = grpc_error
-
-    def __str__(self):
-        return str('P4Runtime RPC error (%s): %s',
-                   self.grpc_error.code().name(), self.grpc_error.details())
-
-
-def parse_p4runtime_error(fun):
-    """
-    Parse P4Runtime error.
-
-    :param fun: function
-    :return: parsed error
-    """
-    @wraps(fun)
-    def handle(*args, **kwargs):
-        try:
-            return fun(*args, **kwargs)
-        except grpc.RpcError as rpc_ex:
-            raise P4RuntimeException(rpc_ex) from None
-        except Exception as ex:
-            raise Exception(ex) from None
-    return handle
-
-
-class P4RuntimeClient:
-    """
-    P4Runtime client.
-
-    Attributes
-    ----------
-    device_id : int
-        P4 device ID
-    grpc_address : str
-        IP address and port
-    election_id : tuple
-        Mastership election ID
-    role_name : str
-        Role name (optional)
-    """
-    def __init__(self, device_id, grpc_address, election_id, role_name=None):
-        self.device_id = device_id
-        self.election_id = election_id
-        self.role_name = role_name
-        self.stream_in_q = None
-        self.stream_out_q = None
-        self.stream = None
-        self.stream_recv_thread = None
-        LOGGER.debug(
-            'Connecting to device %d at %s', device_id, grpc_address)
-        self.channel = grpc.insecure_channel(grpc_address)
-        self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel)
-        try:
-            self.set_up_stream()
-        except P4RuntimeException:
-            LOGGER.critical('Failed to connect to P4Runtime server')
-            sys.exit(1)
-
-    def set_up_stream(self):
-        """
-        Set up a gRPC stream.
-        """
-        self.stream_out_q = queue.Queue()
-        # queues for different messages
-        self.stream_in_q = {
-            STREAM_ATTR_ARBITRATION: queue.Queue(),
-            STREAM_ATTR_PACKET: queue.Queue(),
-            STREAM_ATTR_DIGEST: queue.Queue(),
-            STREAM_ATTR_UNKNOWN: queue.Queue(),
-        }
-
-        def stream_req_iterator():
-            while True:
-                st_p = self.stream_out_q.get()
-                if st_p is None:
-                    break
-                yield st_p
-
-        def stream_recv_wrapper(stream):
-            @parse_p4runtime_error
-            def stream_recv():
-                for st_p in stream:
-                    if st_p.HasField(STREAM_ATTR_ARBITRATION):
-                        self.stream_in_q[STREAM_ATTR_ARBITRATION].put(st_p)
-                    elif st_p.HasField(STREAM_ATTR_PACKET):
-                        self.stream_in_q[STREAM_ATTR_PACKET].put(st_p)
-                    elif st_p.HasField(STREAM_ATTR_DIGEST):
-                        self.stream_in_q[STREAM_ATTR_DIGEST].put(st_p)
-                    else:
-                        self.stream_in_q[STREAM_ATTR_UNKNOWN].put(st_p)
-            try:
-                stream_recv()
-            except P4RuntimeException as ex:
-                LOGGER.critical('StreamChannel error, closing stream')
-                LOGGER.critical(ex)
-                for k in self.stream_in_q:
-                    self.stream_in_q[k].put(None)
-        self.stream = self.stub.StreamChannel(stream_req_iterator())
-        self.stream_recv_thread = threading.Thread(
-            target=stream_recv_wrapper, args=(self.stream,))
-        self.stream_recv_thread.start()
-        self.handshake()
-
-    def handshake(self):
-        """
-        Handshake with gRPC server.
-        """
-
-        req = p4runtime_pb2.StreamMessageRequest()
-        arbitration = req.arbitration
-        arbitration.device_id = self.device_id
-        election_id = arbitration.election_id
-        election_id.high = self.election_id[0]
-        election_id.low = self.election_id[1]
-        if self.role_name is not None:
-            arbitration.role.name = self.role_name
-        self.stream_out_q.put(req)
-
-        rep = self.get_stream_packet(STREAM_ATTR_ARBITRATION, timeout=2)
-        if rep is None:
-            LOGGER.critical('Failed to establish session with server')
-            sys.exit(1)
-        is_primary = (rep.arbitration.status.code == code_pb2.OK)
-        LOGGER.debug('Session established, client is %s',
-                        'primary' if is_primary else 'backup')
-        if not is_primary:
-            LOGGER.warning(
-                'You are not the primary client, '
-                'you only have read access to the server')
-
-    def get_stream_packet(self, type_, timeout=1):
-        """
-        Get a new message from the stream.
-
-        :param type_: stream type.
-        :param timeout: time to wait.
-        :return: message or None
-        """
-        if type_ not in self.stream_in_q:
-            LOGGER.critical('Unknown stream type %s', type_)
-            return None
-        try:
-            msg = self.stream_in_q[type_].get(timeout=timeout)
-            return msg
-        except queue.Empty:  # timeout expired
-            return None
-
-    @parse_p4runtime_error
-    def get_p4info(self):
-        """
-        Retrieve P4Info content.
-
-        :return: P4Info object.
-        """
-
-        LOGGER.debug('Retrieving P4Info file')
-        req = p4runtime_pb2.GetForwardingPipelineConfigRequest()
-        req.device_id = self.device_id
-        req.response_type =\
-            p4runtime_pb2.GetForwardingPipelineConfigRequest.P4INFO_AND_COOKIE
-        rep = self.stub.GetForwardingPipelineConfig(req)
-        return rep.config.p4info
-
-    @parse_p4runtime_error
-    def set_fwd_pipe_config(self, p4info_path, bin_path):
-        """
-        Configure the pipeline.
-
-        :param p4info_path: path to the P4Info file
-        :param bin_path: path to the binary file
-        :return:
-        """
-
-        LOGGER.debug('Setting forwarding pipeline config')
-        req = p4runtime_pb2.SetForwardingPipelineConfigRequest()
-        req.device_id = self.device_id
-        if self.role_name is not None:
-            req.role = self.role_name
-        election_id = req.election_id
-        election_id.high = self.election_id[0]
-        election_id.low = self.election_id[1]
-        req.action =\
-            p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
-        with open(p4info_path, 'r', encoding='utf8') as f_1:
-            with open(bin_path, 'rb', encoding='utf8') as f_2:
-                try:
-                    google.protobuf.text_format.Merge(
-                        f_1.read(), req.config.p4info)
-                except google.protobuf.text_format.ParseError:
-                    LOGGER.error('Error when parsing P4Info')
-                    raise
-                req.config.p4_device_config = f_2.read()
-        return self.stub.SetForwardingPipelineConfig(req)
-
-    def tear_down(self):
-        """
-        Tear connection with the gRPC server down.
-        """
-
-        if self.stream_out_q:
-            LOGGER.debug('Cleaning up stream')
-            self.stream_out_q.put(None)
-        if self.stream_in_q:
-            for k in self.stream_in_q:
-                self.stream_in_q[k].put(None)
-        if self.stream_recv_thread:
-            self.stream_recv_thread.join()
-        self.channel.close()
-        del self.channel
diff --git a/src/device/tests/Device_OpenConfig_Template.py b/src/device/tests/Device_OpenConfig_Template.py
index 6afa2721ff920c39de243b308b9b9a4749cb013b..af339cce40b60f8ea0e310613c951968f4fc9aeb 100644
--- a/src/device/tests/Device_OpenConfig_Template.py
+++ b/src/device/tests/Device_OpenConfig_Template.py
@@ -32,9 +32,11 @@ DEVICE_OC_CONNECT_RULES = json_device_connect_rules(DEVICE_OC_ADDRESS, DEVICE_OC
     'hostkey_verify' : True,
     'look_for_keys'  : True,
     'allow_agent'    : True,
+    'delete_rule'    : False,
     'device_params'  : {'name': 'default'},
     'manager_params' : {'timeout' : DEVICE_OC_TIMEOUT},
 })
 
+
 DEVICE_OC_CONFIG_RULES   = []           # populate your configuration rules to test
 DEVICE_OC_DECONFIG_RULES = []           # populate your deconfiguration rules to test
diff --git a/src/device/tests/device_p4.py b/src/device/tests/device_p4.py
index 4cd0a4c745d3a07b71f320ce79d73c95ffb0af37..ccc62c2195c8dae41a8e98b128b08965954d57f0 100644
--- a/src/device/tests/device_p4.py
+++ b/src/device/tests/device_p4.py
@@ -16,18 +16,23 @@
 P4 device example configuration.
 """
 
-from common.tools.object_factory.ConfigRule import json_config_rule_set
+import os
+from common.tools.object_factory.ConfigRule import (
+    json_config_rule_set, json_config_rule_delete)
 from common.tools.object_factory.Device import (
     json_device_connect_rules, json_device_id, json_device_p4_disabled)
 
-DEVICE_P4_DPID = 0
+CUR_PATH = os.path.dirname(os.path.abspath(__file__))
+
+DEVICE_P4_DPID = 1
 DEVICE_P4_NAME = 'device:leaf1'
-DEVICE_P4_ADDRESS = '127.0.0.1'
+DEVICE_P4_IP_ADDR = '127.0.0.1'
 DEVICE_P4_PORT = '50101'
 DEVICE_P4_VENDOR = 'Open Networking Foundation'
 DEVICE_P4_HW_VER = 'BMv2 simple_switch'
 DEVICE_P4_SW_VER = 'Stratum'
-DEVICE_P4_PIPECONF = 'org.onosproject.pipelines.fabric'
+DEVICE_P4_BIN_PATH = os.path.join(CUR_PATH, 'p4/test-bmv2.json')
+DEVICE_P4_INFO_PATH = os.path.join(CUR_PATH, 'p4/test-p4info.txt')
 DEVICE_P4_WORKERS = 2
 DEVICE_P4_GRACE_PERIOD = 60
 DEVICE_P4_TIMEOUT = 60
@@ -37,16 +42,52 @@ DEVICE_P4_ID = json_device_id(DEVICE_P4_UUID)
 DEVICE_P4 = json_device_p4_disabled(DEVICE_P4_UUID)
 
 DEVICE_P4_CONNECT_RULES = json_device_connect_rules(
-    DEVICE_P4_ADDRESS, DEVICE_P4_PORT, {
+    DEVICE_P4_IP_ADDR,
+    DEVICE_P4_PORT,
+    {
         'id': DEVICE_P4_DPID,
         'name': DEVICE_P4_NAME,
-        'hw-ver': DEVICE_P4_HW_VER,
-        'sw-ver': DEVICE_P4_SW_VER,
-        'pipeconf': DEVICE_P4_PIPECONF,
-        'timeout': DEVICE_P4_TIMEOUT
+        'vendor': DEVICE_P4_VENDOR,
+        'hw_ver': DEVICE_P4_HW_VER,
+        'sw_ver': DEVICE_P4_SW_VER,
+        'timeout': DEVICE_P4_TIMEOUT,
+        'p4bin': DEVICE_P4_BIN_PATH,
+        'p4info': DEVICE_P4_INFO_PATH
     }
 )
 
-DEVICE_P4_CONFIG_RULES = [
-    json_config_rule_set('key1', 'value1'),
+DEVICE_P4_CONFIG_TABLE_ENTRY = [
+    json_config_rule_set(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.acl_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:22 &&& ff:ff:ff:ff:ff:ff'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.clone_to_cpu',
+            'action-params': [],
+            'priority': 1
+        }
+    )
+]
+
+DEVICE_P4_DECONFIG_TABLE_ENTRY = [
+    json_config_rule_delete(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.acl_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:22 &&& ff:ff:ff:ff:ff:ff'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.clone_to_cpu',
+            'action-params': [],
+            'priority': 1
+        }
+    )
 ]
diff --git a/src/device/tests/device_report.xml b/src/device/tests/device_report.xml
deleted file mode 100644
index c05ea0ba79d2b1b6fb5434a76c2e6af022eb2e2c..0000000000000000000000000000000000000000
--- a/src/device/tests/device_report.xml
+++ /dev/null
@@ -1 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="pytest" errors="0" failures="0" skipped="0" tests="0" time="0.017" timestamp="2022-07-29T09:28:47.168633" hostname="86d45e18bd70" /></testsuites>
\ No newline at end of file
diff --git a/src/device/tests/mock_p4runtime_service.py b/src/device/tests/mock_p4runtime_service.py
index 77da0113676dc6f820d995b34915df6d0ba30f01..c1b2dcb45a18caf0c839f9bd8a68484bba5efbea 100644
--- a/src/device/tests/mock_p4runtime_service.py
+++ b/src/device/tests/mock_p4runtime_service.py
@@ -22,7 +22,7 @@ import grpc
 from p4.v1 import p4runtime_pb2_grpc
 
 from .device_p4 import(
-    DEVICE_P4_ADDRESS, DEVICE_P4_PORT,
+    DEVICE_P4_IP_ADDR, DEVICE_P4_PORT,
     DEVICE_P4_WORKERS, DEVICE_P4_GRACE_PERIOD)
 from .mock_p4runtime_servicer_impl import MockP4RuntimeServicerImpl
 
@@ -35,7 +35,7 @@ class MockP4RuntimeService:
     """
 
     def __init__(
-            self, address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT,
+            self, address=DEVICE_P4_IP_ADDR, port=DEVICE_P4_PORT,
             max_workers=DEVICE_P4_WORKERS,
             grace_period=DEVICE_P4_GRACE_PERIOD):
         self.address = address
diff --git a/src/device/tests/mock_p4runtime_servicer_impl.py b/src/device/tests/mock_p4runtime_servicer_impl.py
index d29445da43afb58ef062f62c496b0780f92a4648..8a516303d9310be55662ef749175655c4069ae5c 100644
--- a/src/device/tests/mock_p4runtime_servicer_impl.py
+++ b/src/device/tests/mock_p4runtime_servicer_impl.py
@@ -22,11 +22,12 @@ from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc
 from p4.config.v1 import p4info_pb2
 
 try:
-    from p4_util import STREAM_ATTR_ARBITRATION, STREAM_ATTR_PACKET
+    from p4_client import STREAM_ATTR_ARBITRATION, STREAM_ATTR_PACKET
 except ImportError:
-    from device.service.drivers.p4.p4_util import STREAM_ATTR_ARBITRATION,\
+    from device.service.drivers.p4.p4_client import STREAM_ATTR_ARBITRATION,\
         STREAM_ATTR_PACKET
 
+
 class MockP4RuntimeServicerImpl(p4runtime_pb2_grpc.P4RuntimeServicer):
     """
     A P4Runtime service implementation for testing purposes.
diff --git a/src/device/tests/p4/test-bmv2.json b/src/device/tests/p4/test-bmv2.json
new file mode 100644
index 0000000000000000000000000000000000000000..f6ef6af34907ae00bcfa1034bb317a8f270b8995
--- /dev/null
+++ b/src/device/tests/p4/test-bmv2.json
@@ -0,0 +1,1910 @@
+{
+  "header_types" : [
+    {
+      "name" : "scalars_0",
+      "id" : 0,
+      "fields" : [
+        ["tmp_0", 1, false],
+        ["tmp_1", 1, false],
+        ["tmp", 1, false],
+        ["local_metadata_t.l4_src_port", 16, false],
+        ["local_metadata_t.l4_dst_port", 16, false],
+        ["local_metadata_t.is_multicast", 1, false],
+        ["local_metadata_t.next_srv6_sid", 128, false],
+        ["local_metadata_t.ip_proto", 8, false],
+        ["local_metadata_t.icmp_type", 8, false],
+        ["_padding_0", 4, false]
+      ]
+    },
+    {
+      "name" : "standard_metadata",
+      "id" : 1,
+      "fields" : [
+        ["ingress_port", 9, false],
+        ["egress_spec", 9, false],
+        ["egress_port", 9, false],
+        ["clone_spec", 32, false],
+        ["instance_type", 32, false],
+        ["drop", 1, false],
+        ["recirculate_port", 16, false],
+        ["packet_length", 32, false],
+        ["enq_timestamp", 32, false],
+        ["enq_qdepth", 19, false],
+        ["deq_timedelta", 32, false],
+        ["deq_qdepth", 19, false],
+        ["ingress_global_timestamp", 48, false],
+        ["egress_global_timestamp", 48, false],
+        ["lf_field_list", 32, false],
+        ["mcast_grp", 16, false],
+        ["resubmit_flag", 32, false],
+        ["egress_rid", 16, false],
+        ["recirculate_flag", 32, false],
+        ["checksum_error", 1, false],
+        ["parser_error", 32, false],
+        ["priority", 3, false],
+        ["_padding", 2, false]
+      ]
+    },
+    {
+      "name" : "cpu_out_header_t",
+      "id" : 2,
+      "fields" : [
+        ["egress_port", 9, false],
+        ["_pad", 7, false]
+      ]
+    },
+    {
+      "name" : "cpu_in_header_t",
+      "id" : 3,
+      "fields" : [
+        ["ingress_port", 9, false],
+        ["_pad", 7, false]
+      ]
+    },
+    {
+      "name" : "ethernet_t",
+      "id" : 4,
+      "fields" : [
+        ["dst_addr", 48, false],
+        ["src_addr", 48, false],
+        ["ether_type", 16, false]
+      ]
+    },
+    {
+      "name" : "ipv4_t",
+      "id" : 5,
+      "fields" : [
+        ["version", 4, false],
+        ["ihl", 4, false],
+        ["dscp", 6, false],
+        ["ecn", 2, false],
+        ["total_len", 16, false],
+        ["identification", 16, false],
+        ["flags", 3, false],
+        ["frag_offset", 13, false],
+        ["ttl", 8, false],
+        ["protocol", 8, false],
+        ["hdr_checksum", 16, false],
+        ["src_addr", 32, false],
+        ["dst_addr", 32, false]
+      ]
+    },
+    {
+      "name" : "ipv6_t",
+      "id" : 6,
+      "fields" : [
+        ["version", 4, false],
+        ["traffic_class", 8, false],
+        ["flow_label", 20, false],
+        ["payload_len", 16, false],
+        ["next_hdr", 8, false],
+        ["hop_limit", 8, false],
+        ["src_addr", 128, false],
+        ["dst_addr", 128, false]
+      ]
+    },
+    {
+      "name" : "srv6h_t",
+      "id" : 7,
+      "fields" : [
+        ["next_hdr", 8, false],
+        ["hdr_ext_len", 8, false],
+        ["routing_type", 8, false],
+        ["segment_left", 8, false],
+        ["last_entry", 8, false],
+        ["flags", 8, false],
+        ["tag", 16, false]
+      ]
+    },
+    {
+      "name" : "tcp_t",
+      "id" : 8,
+      "fields" : [
+        ["src_port", 16, false],
+        ["dst_port", 16, false],
+        ["seq_no", 32, false],
+        ["ack_no", 32, false],
+        ["data_offset", 4, false],
+        ["res", 3, false],
+        ["ecn", 3, false],
+        ["ctrl", 6, false],
+        ["window", 16, false],
+        ["checksum", 16, false],
+        ["urgent_ptr", 16, false]
+      ]
+    },
+    {
+      "name" : "udp_t",
+      "id" : 9,
+      "fields" : [
+        ["src_port", 16, false],
+        ["dst_port", 16, false],
+        ["len", 16, false],
+        ["checksum", 16, false]
+      ]
+    },
+    {
+      "name" : "icmp_t",
+      "id" : 10,
+      "fields" : [
+        ["type", 8, false],
+        ["icmp_code", 8, false],
+        ["checksum", 16, false],
+        ["identifier", 16, false],
+        ["sequence_number", 16, false],
+        ["timestamp", 64, false]
+      ]
+    },
+    {
+      "name" : "icmpv6_t",
+      "id" : 11,
+      "fields" : [
+        ["type", 8, false],
+        ["code", 8, false],
+        ["checksum", 16, false]
+      ]
+    },
+    {
+      "name" : "ndp_t",
+      "id" : 12,
+      "fields" : [
+        ["flags", 32, false],
+        ["target_ipv6_addr", 128, false],
+        ["type", 8, false],
+        ["length", 8, false],
+        ["target_mac_addr", 48, false]
+      ]
+    },
+    {
+      "name" : "srv6_list_t",
+      "id" : 13,
+      "fields" : [
+        ["segment_id", 128, false]
+      ]
+    }
+  ],
+  "headers" : [
+    {
+      "name" : "scalars",
+      "id" : 0,
+      "header_type" : "scalars_0",
+      "metadata" : true,
+      "pi_omit" : true
+    },
+    {
+      "name" : "standard_metadata",
+      "id" : 1,
+      "header_type" : "standard_metadata",
+      "metadata" : true,
+      "pi_omit" : true
+    },
+    {
+      "name" : "cpu_out",
+      "id" : 2,
+      "header_type" : "cpu_out_header_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "cpu_in",
+      "id" : 3,
+      "header_type" : "cpu_in_header_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "ethernet",
+      "id" : 4,
+      "header_type" : "ethernet_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "ipv4",
+      "id" : 5,
+      "header_type" : "ipv4_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "ipv6",
+      "id" : 6,
+      "header_type" : "ipv6_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "srv6h",
+      "id" : 7,
+      "header_type" : "srv6h_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "tcp",
+      "id" : 8,
+      "header_type" : "tcp_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "udp",
+      "id" : 9,
+      "header_type" : "udp_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "icmp",
+      "id" : 10,
+      "header_type" : "icmp_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "icmpv6",
+      "id" : 11,
+      "header_type" : "icmpv6_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "ndp",
+      "id" : 12,
+      "header_type" : "ndp_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "srv6_list[0]",
+      "id" : 13,
+      "header_type" : "srv6_list_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "srv6_list[1]",
+      "id" : 14,
+      "header_type" : "srv6_list_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "srv6_list[2]",
+      "id" : 15,
+      "header_type" : "srv6_list_t",
+      "metadata" : false,
+      "pi_omit" : true
+    },
+    {
+      "name" : "srv6_list[3]",
+      "id" : 16,
+      "header_type" : "srv6_list_t",
+      "metadata" : false,
+      "pi_omit" : true
+    }
+  ],
+  "header_stacks" : [
+    {
+      "name" : "srv6_list",
+      "id" : 0,
+      "header_type" : "srv6_list_t",
+      "size" : 4,
+      "header_ids" : [13, 14, 15, 16]
+    }
+  ],
+  "header_union_types" : [],
+  "header_unions" : [],
+  "header_union_stacks" : [],
+  "field_lists" : [
+    {
+      "id" : 1,
+      "name" : "fl",
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 474,
+        "column" : 34,
+        "source_fragment" : "{ standard_metadata.ingress_port }"
+      },
+      "elements" : [
+        {
+          "type" : "field",
+          "value" : ["standard_metadata", "ingress_port"]
+        }
+      ]
+    }
+  ],
+  "errors" : [
+    ["NoError", 1],
+    ["PacketTooShort", 2],
+    ["NoMatch", 3],
+    ["StackOutOfBounds", 4],
+    ["HeaderTooShort", 5],
+    ["ParserTimeout", 6],
+    ["ParserInvalidArgument", 7]
+  ],
+  "enums" : [],
+  "parsers" : [
+    {
+      "name" : "parser",
+      "id" : 0,
+      "init_state" : "start",
+      "parse_states" : [
+        {
+          "name" : "start",
+          "id" : 0,
+          "parser_ops" : [],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x00ff",
+              "mask" : null,
+              "next_state" : "parse_packet_out"
+            },
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : "parse_ethernet"
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "ingress_port"]
+            }
+          ]
+        },
+        {
+          "name" : "parse_packet_out",
+          "id" : 1,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "cpu_out"
+                }
+              ],
+              "op" : "extract"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : "parse_ethernet"
+            }
+          ],
+          "transition_key" : []
+        },
+        {
+          "name" : "parse_ethernet",
+          "id" : 2,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "ethernet"
+                }
+              ],
+              "op" : "extract"
+            }
+          ],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x0800",
+              "mask" : null,
+              "next_state" : "parse_ipv4"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x86dd",
+              "mask" : null,
+              "next_state" : "parse_ipv6"
+            },
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["ethernet", "ether_type"]
+            }
+          ]
+        },
+        {
+          "name" : "parse_ipv4",
+          "id" : 3,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "ipv4"
+                }
+              ],
+              "op" : "extract"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.ip_proto"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["ipv4", "protocol"]
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x06",
+              "mask" : null,
+              "next_state" : "parse_tcp"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x11",
+              "mask" : null,
+              "next_state" : "parse_udp"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x01",
+              "mask" : null,
+              "next_state" : "parse_icmp"
+            },
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["ipv4", "protocol"]
+            }
+          ]
+        },
+        {
+          "name" : "parse_ipv6",
+          "id" : 4,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "ipv6"
+                }
+              ],
+              "op" : "extract"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.ip_proto"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["ipv6", "next_hdr"]
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x06",
+              "mask" : null,
+              "next_state" : "parse_tcp"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x11",
+              "mask" : null,
+              "next_state" : "parse_udp"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x3a",
+              "mask" : null,
+              "next_state" : "parse_icmpv6"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x2b",
+              "mask" : null,
+              "next_state" : "parse_srv6"
+            },
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["ipv6", "next_hdr"]
+            }
+          ]
+        },
+        {
+          "name" : "parse_tcp",
+          "id" : 5,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "tcp"
+                }
+              ],
+              "op" : "extract"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.l4_src_port"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["tcp", "src_port"]
+                }
+              ],
+              "op" : "set"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.l4_dst_port"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["tcp", "dst_port"]
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : []
+        },
+        {
+          "name" : "parse_udp",
+          "id" : 6,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "udp"
+                }
+              ],
+              "op" : "extract"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.l4_src_port"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["udp", "src_port"]
+                }
+              ],
+              "op" : "set"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.l4_dst_port"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["udp", "dst_port"]
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : []
+        },
+        {
+          "name" : "parse_icmp",
+          "id" : 7,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "icmp"
+                }
+              ],
+              "op" : "extract"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.icmp_type"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["icmp", "type"]
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : []
+        },
+        {
+          "name" : "parse_icmpv6",
+          "id" : 8,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "icmpv6"
+                }
+              ],
+              "op" : "extract"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.icmp_type"]
+                },
+                {
+                  "type" : "field",
+                  "value" : ["icmpv6", "type"]
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x87",
+              "mask" : null,
+              "next_state" : "parse_ndp"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x88",
+              "mask" : null,
+              "next_state" : "parse_ndp"
+            },
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["icmpv6", "type"]
+            }
+          ]
+        },
+        {
+          "name" : "parse_ndp",
+          "id" : 9,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "ndp"
+                }
+              ],
+              "op" : "extract"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : []
+        },
+        {
+          "name" : "parse_srv6",
+          "id" : 10,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "srv6h"
+                }
+              ],
+              "op" : "extract"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : "parse_srv6_list"
+            }
+          ],
+          "transition_key" : []
+        },
+        {
+          "name" : "parse_srv6_list",
+          "id" : 11,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "stack",
+                  "value" : "srv6_list"
+                }
+              ],
+              "op" : "extract"
+            },
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "tmp_0"]
+                },
+                {
+                  "type" : "expression",
+                  "value" : {
+                    "type" : "expression",
+                    "value" : {
+                      "op" : "?",
+                      "left" : {
+                        "type" : "hexstr",
+                        "value" : "0x01"
+                      },
+                      "right" : {
+                        "type" : "hexstr",
+                        "value" : "0x00"
+                      },
+                      "cond" : {
+                        "type" : "expression",
+                        "value" : {
+                          "op" : "==",
+                          "left" : {
+                            "type" : "expression",
+                            "value" : {
+                              "op" : "&",
+                              "left" : {
+                                "type" : "expression",
+                                "value" : {
+                                  "op" : "+",
+                                  "left" : {
+                                    "type" : "expression",
+                                    "value" : {
+                                      "op" : "&",
+                                      "left" : {
+                                        "type" : "field",
+                                        "value" : ["srv6h", "segment_left"]
+                                      },
+                                      "right" : {
+                                        "type" : "hexstr",
+                                        "value" : "0xffffffff"
+                                      }
+                                    }
+                                  },
+                                  "right" : {
+                                    "type" : "hexstr",
+                                    "value" : "0xffffffff"
+                                  }
+                                }
+                              },
+                              "right" : {
+                                "type" : "hexstr",
+                                "value" : "0xffffffff"
+                              }
+                            }
+                          },
+                          "right" : {
+                            "type" : "expression",
+                            "value" : {
+                              "op" : "last_stack_index",
+                              "left" : null,
+                              "right" : {
+                                "type" : "header_stack",
+                                "value" : "srv6_list"
+                              }
+                            }
+                          }
+                        }
+                      }
+                    }
+                  }
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x01",
+              "mask" : null,
+              "next_state" : "mark_current_srv6"
+            },
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : "check_last_srv6"
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "tmp_0"]
+            }
+          ]
+        },
+        {
+          "name" : "mark_current_srv6",
+          "id" : 12,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "local_metadata_t.next_srv6_sid"]
+                },
+                {
+                  "type" : "expression",
+                  "value" : {
+                    "type" : "stack_field",
+                    "value" : ["srv6_list", "segment_id"]
+                  }
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : "check_last_srv6"
+            }
+          ],
+          "transition_key" : []
+        },
+        {
+          "name" : "check_last_srv6",
+          "id" : 13,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "field",
+                  "value" : ["scalars", "tmp_1"]
+                },
+                {
+                  "type" : "expression",
+                  "value" : {
+                    "type" : "expression",
+                    "value" : {
+                      "op" : "?",
+                      "left" : {
+                        "type" : "hexstr",
+                        "value" : "0x01"
+                      },
+                      "right" : {
+                        "type" : "hexstr",
+                        "value" : "0x00"
+                      },
+                      "cond" : {
+                        "type" : "expression",
+                        "value" : {
+                          "op" : "==",
+                          "left" : {
+                            "type" : "expression",
+                            "value" : {
+                              "op" : "&",
+                              "left" : {
+                                "type" : "field",
+                                "value" : ["srv6h", "last_entry"]
+                              },
+                              "right" : {
+                                "type" : "hexstr",
+                                "value" : "0xffffffff"
+                              }
+                            }
+                          },
+                          "right" : {
+                            "type" : "expression",
+                            "value" : {
+                              "op" : "last_stack_index",
+                              "left" : null,
+                              "right" : {
+                                "type" : "header_stack",
+                                "value" : "srv6_list"
+                              }
+                            }
+                          }
+                        }
+                      }
+                    }
+                  }
+                }
+              ],
+              "op" : "set"
+            }
+          ],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x01",
+              "mask" : null,
+              "next_state" : "parse_srv6_next_hdr"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x00",
+              "mask" : null,
+              "next_state" : "parse_srv6_list"
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "tmp_1"]
+            }
+          ]
+        },
+        {
+          "name" : "parse_srv6_next_hdr",
+          "id" : 14,
+          "parser_ops" : [],
+          "transitions" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x06",
+              "mask" : null,
+              "next_state" : "parse_tcp"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x11",
+              "mask" : null,
+              "next_state" : "parse_udp"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x3a",
+              "mask" : null,
+              "next_state" : "parse_icmpv6"
+            },
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : [
+            {
+              "type" : "field",
+              "value" : ["srv6h", "next_hdr"]
+            }
+          ]
+        }
+      ]
+    }
+  ],
+  "parse_vsets" : [],
+  "deparsers" : [
+    {
+      "name" : "deparser",
+      "id" : 0,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 602,
+        "column" : 8,
+        "source_fragment" : "DeparserImpl"
+      },
+      "order" : ["cpu_in", "ethernet", "ipv4", "ipv6", "srv6h", "srv6_list[0]", "srv6_list[1]", "srv6_list[2]", "srv6_list[3]", "tcp", "udp", "icmp", "icmpv6", "ndp"]
+    }
+  ],
+  "meter_arrays" : [],
+  "counter_arrays" : [
+    {
+      "name" : "l2_exact_table_counter",
+      "id" : 0,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 399,
+        "column" : 8,
+        "source_fragment" : "counters"
+      },
+      "is_direct" : true,
+      "binding" : "IngressPipeImpl.l2_exact_table"
+    },
+    {
+      "name" : "l2_ternary_table_counter",
+      "id" : 1,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 423,
+        "column" : 8,
+        "source_fragment" : "counters"
+      },
+      "is_direct" : true,
+      "binding" : "IngressPipeImpl.l2_ternary_table"
+    },
+    {
+      "name" : "acl_table_counter",
+      "id" : 2,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 494,
+        "column" : 8,
+        "source_fragment" : "counters"
+      },
+      "is_direct" : true,
+      "binding" : "IngressPipeImpl.acl_table"
+    }
+  ],
+  "register_arrays" : [],
+  "calculations" : [
+    {
+      "name" : "calc",
+      "id" : 0,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 580,
+        "column" : 8,
+        "source_fragment" : "update_checksum(hdr.ndp.isValid(), ..."
+      },
+      "algo" : "csum16",
+      "input" : [
+        {
+          "type" : "field",
+          "value" : ["ipv6", "src_addr"]
+        },
+        {
+          "type" : "field",
+          "value" : ["ipv6", "dst_addr"]
+        },
+        {
+          "type" : "field",
+          "value" : ["ipv6", "payload_len"]
+        },
+        {
+          "type" : "hexstr",
+          "value" : "0x00",
+          "bitwidth" : 8
+        },
+        {
+          "type" : "field",
+          "value" : ["ipv6", "next_hdr"]
+        },
+        {
+          "type" : "field",
+          "value" : ["icmpv6", "type"]
+        },
+        {
+          "type" : "field",
+          "value" : ["icmpv6", "code"]
+        },
+        {
+          "type" : "field",
+          "value" : ["ndp", "flags"]
+        },
+        {
+          "type" : "field",
+          "value" : ["ndp", "target_ipv6_addr"]
+        },
+        {
+          "type" : "field",
+          "value" : ["ndp", "type"]
+        },
+        {
+          "type" : "field",
+          "value" : ["ndp", "length"]
+        },
+        {
+          "type" : "field",
+          "value" : ["ndp", "target_mac_addr"]
+        }
+      ]
+    }
+  ],
+  "learn_lists" : [],
+  "actions" : [
+    {
+      "name" : "NoAction",
+      "id" : 0,
+      "runtime_data" : [],
+      "primitives" : []
+    },
+    {
+      "name" : "IngressPipeImpl.drop",
+      "id" : 1,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "mark_to_drop",
+          "parameters" : [
+            {
+              "type" : "header",
+              "value" : "standard_metadata"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 351,
+            "column" : 8,
+            "source_fragment" : "mark_to_drop(standard_metadata)"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.drop",
+      "id" : 2,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "mark_to_drop",
+          "parameters" : [
+            {
+              "type" : "header",
+              "value" : "standard_metadata"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 351,
+            "column" : 8,
+            "source_fragment" : "mark_to_drop(standard_metadata)"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.drop",
+      "id" : 3,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "mark_to_drop",
+          "parameters" : [
+            {
+              "type" : "header",
+              "value" : "standard_metadata"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 351,
+            "column" : 8,
+            "source_fragment" : "mark_to_drop(standard_metadata)"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.set_egress_port",
+      "id" : 4,
+      "runtime_data" : [
+        {
+          "name" : "port_num",
+          "bitwidth" : 9
+        }
+      ],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "egress_spec"]
+            },
+            {
+              "type" : "runtime_data",
+              "value" : 0
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 383,
+            "column" : 8,
+            "source_fragment" : "standard_metadata.egress_spec = port_num"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.set_multicast_group",
+      "id" : 5,
+      "runtime_data" : [
+        {
+          "name" : "gid",
+          "bitwidth" : 16
+        }
+      ],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "mcast_grp"]
+            },
+            {
+              "type" : "runtime_data",
+              "value" : 0
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 409,
+            "column" : 8,
+            "source_fragment" : "standard_metadata.mcast_grp = gid"
+          }
+        },
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "local_metadata_t.is_multicast"]
+            },
+            {
+              "type" : "expression",
+              "value" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "b2d",
+                  "left" : null,
+                  "right" : {
+                    "type" : "bool",
+                    "value" : true
+                  }
+                }
+              }
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 410,
+            "column" : 8,
+            "source_fragment" : "local_metadata.is_multicast = true"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.send_to_cpu",
+      "id" : 6,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "egress_spec"]
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x00ff"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 466,
+            "column" : 8,
+            "source_fragment" : "standard_metadata.egress_spec = 255"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.clone_to_cpu",
+      "id" : 7,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "clone_ingress_pkt_to_egress",
+          "parameters" : [
+            {
+              "type" : "hexstr",
+              "value" : "0x00000063"
+            },
+            {
+              "type" : "hexstr",
+              "value" : "0x1"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 474,
+            "column" : 8,
+            "source_fragment" : "clone3(CloneType.I2E, 99, { standard_metadata.ingress_port })"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "act",
+      "id" : 8,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "tmp"]
+            },
+            {
+              "type" : "expression",
+              "value" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "b2d",
+                  "left" : null,
+                  "right" : {
+                    "type" : "bool",
+                    "value" : true
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name" : "act_0",
+      "id" : 9,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "tmp"]
+            },
+            {
+              "type" : "expression",
+              "value" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "b2d",
+                  "left" : null,
+                  "right" : {
+                    "type" : "bool",
+                    "value" : false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name" : "act_1",
+      "id" : 10,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "mark_to_drop",
+          "parameters" : [
+            {
+              "type" : "header",
+              "value" : "standard_metadata"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 567,
+            "column" : 12,
+            "source_fragment" : "mark_to_drop(standard_metadata)"
+          }
+        }
+      ]
+    }
+  ],
+  "pipelines" : [
+    {
+      "name" : "ingress",
+      "id" : 0,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 345,
+        "column" : 8,
+        "source_fragment" : "IngressPipeImpl"
+      },
+      "init_table" : "IngressPipeImpl.l2_exact_table",
+      "tables" : [
+        {
+          "name" : "IngressPipeImpl.l2_exact_table",
+          "id" : 0,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 386,
+            "column" : 10,
+            "source_fragment" : "l2_exact_table"
+          },
+          "key" : [
+            {
+              "match_type" : "exact",
+              "name" : "hdr.ethernet.dst_addr",
+              "target" : ["ethernet", "dst_addr"],
+              "mask" : null
+            }
+          ],
+          "match_type" : "exact",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : true,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [4, 1],
+          "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.drop"],
+          "base_default_next" : null,
+          "next_tables" : {
+            "__HIT__" : "tbl_act",
+            "__MISS__" : "tbl_act_0"
+          },
+          "default_entry" : {
+            "action_id" : 1,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        },
+        {
+          "name" : "tbl_act",
+          "id" : 1,
+          "key" : [],
+          "match_type" : "exact",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : false,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [8],
+          "actions" : ["act"],
+          "base_default_next" : "node_5",
+          "next_tables" : {
+            "act" : "node_5"
+          },
+          "default_entry" : {
+            "action_id" : 8,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        },
+        {
+          "name" : "tbl_act_0",
+          "id" : 2,
+          "key" : [],
+          "match_type" : "exact",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : false,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [9],
+          "actions" : ["act_0"],
+          "base_default_next" : "node_5",
+          "next_tables" : {
+            "act_0" : "node_5"
+          },
+          "default_entry" : {
+            "action_id" : 9,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        },
+        {
+          "name" : "IngressPipeImpl.l2_ternary_table",
+          "id" : 3,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 413,
+            "column" : 10,
+            "source_fragment" : "l2_ternary_table"
+          },
+          "key" : [
+            {
+              "match_type" : "ternary",
+              "name" : "hdr.ethernet.dst_addr",
+              "target" : ["ethernet", "dst_addr"],
+              "mask" : null
+            }
+          ],
+          "match_type" : "ternary",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : true,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [5, 2],
+          "actions" : ["IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"],
+          "base_default_next" : "IngressPipeImpl.acl_table",
+          "next_tables" : {
+            "IngressPipeImpl.set_multicast_group" : "IngressPipeImpl.acl_table",
+            "IngressPipeImpl.drop" : "IngressPipeImpl.acl_table"
+          },
+          "default_entry" : {
+            "action_id" : 2,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        },
+        {
+          "name" : "IngressPipeImpl.acl_table",
+          "id" : 4,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 477,
+            "column" : 10,
+            "source_fragment" : "acl_table"
+          },
+          "key" : [
+            {
+              "match_type" : "ternary",
+              "name" : "standard_metadata.ingress_port",
+              "target" : ["standard_metadata", "ingress_port"],
+              "mask" : null
+            },
+            {
+              "match_type" : "ternary",
+              "name" : "hdr.ethernet.dst_addr",
+              "target" : ["ethernet", "dst_addr"],
+              "mask" : null
+            },
+            {
+              "match_type" : "ternary",
+              "name" : "hdr.ethernet.src_addr",
+              "target" : ["ethernet", "src_addr"],
+              "mask" : null
+            },
+            {
+              "match_type" : "ternary",
+              "name" : "hdr.ethernet.ether_type",
+              "target" : ["ethernet", "ether_type"],
+              "mask" : null
+            },
+            {
+              "match_type" : "ternary",
+              "name" : "local_metadata.ip_proto",
+              "target" : ["scalars", "local_metadata_t.ip_proto"],
+              "mask" : null
+            },
+            {
+              "match_type" : "ternary",
+              "name" : "local_metadata.icmp_type",
+              "target" : ["scalars", "local_metadata_t.icmp_type"],
+              "mask" : null
+            },
+            {
+              "match_type" : "ternary",
+              "name" : "local_metadata.l4_src_port",
+              "target" : ["scalars", "local_metadata_t.l4_src_port"],
+              "mask" : null
+            },
+            {
+              "match_type" : "ternary",
+              "name" : "local_metadata.l4_dst_port",
+              "target" : ["scalars", "local_metadata_t.l4_dst_port"],
+              "mask" : null
+            }
+          ],
+          "match_type" : "ternary",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : true,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [6, 7, 3, 0],
+          "actions" : ["IngressPipeImpl.send_to_cpu", "IngressPipeImpl.clone_to_cpu", "IngressPipeImpl.drop", "NoAction"],
+          "base_default_next" : null,
+          "next_tables" : {
+            "IngressPipeImpl.send_to_cpu" : null,
+            "IngressPipeImpl.clone_to_cpu" : null,
+            "IngressPipeImpl.drop" : null,
+            "NoAction" : null
+          },
+          "default_entry" : {
+            "action_id" : 0,
+            "action_const" : false,
+            "action_data" : [],
+            "action_entry_const" : false
+          }
+        }
+      ],
+      "action_profiles" : [],
+      "conditionals" : [
+        {
+          "name" : "node_5",
+          "id" : 0,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 533,
+            "column" : 16,
+            "source_fragment" : "!l2_exact_table.apply().hit"
+          },
+          "expression" : {
+            "type" : "expression",
+            "value" : {
+              "op" : "not",
+              "left" : null,
+              "right" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "d2b",
+                  "left" : null,
+                  "right" : {
+                    "type" : "field",
+                    "value" : ["scalars", "tmp"]
+                  }
+                }
+              }
+            }
+          },
+          "true_next" : "IngressPipeImpl.l2_ternary_table",
+          "false_next" : "IngressPipeImpl.acl_table"
+        }
+      ]
+    },
+    {
+      "name" : "egress",
+      "id" : 1,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 546,
+        "column" : 8,
+        "source_fragment" : "EgressPipeImpl"
+      },
+      "init_table" : "node_10",
+      "tables" : [
+        {
+          "name" : "tbl_act_1",
+          "id" : 5,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 567,
+            "column" : 12,
+            "source_fragment" : "mark_to_drop(standard_metadata)"
+          },
+          "key" : [],
+          "match_type" : "exact",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : false,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [10],
+          "actions" : ["act_1"],
+          "base_default_next" : null,
+          "next_tables" : {
+            "act_1" : null
+          },
+          "default_entry" : {
+            "action_id" : 10,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        }
+      ],
+      "action_profiles" : [],
+      "conditionals" : [
+        {
+          "name" : "node_10",
+          "id" : 1,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 565,
+            "column" : 12,
+            "source_fragment" : "local_metadata.is_multicast == true && ..."
+          },
+          "expression" : {
+            "type" : "expression",
+            "value" : {
+              "op" : "and",
+              "left" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "==",
+                  "left" : {
+                    "type" : "expression",
+                    "value" : {
+                      "op" : "d2b",
+                      "left" : null,
+                      "right" : {
+                        "type" : "field",
+                        "value" : ["scalars", "local_metadata_t.is_multicast"]
+                      }
+                    }
+                  },
+                  "right" : {
+                    "type" : "bool",
+                    "value" : true
+                  }
+                }
+              },
+              "right" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "==",
+                  "left" : {
+                    "type" : "field",
+                    "value" : ["standard_metadata", "ingress_port"]
+                  },
+                  "right" : {
+                    "type" : "field",
+                    "value" : ["standard_metadata", "egress_port"]
+                  }
+                }
+              }
+            }
+          },
+          "false_next" : null,
+          "true_next" : "tbl_act_1"
+        }
+      ]
+    }
+  ],
+  "checksums" : [
+    {
+      "name" : "cksum",
+      "id" : 0,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 580,
+        "column" : 8,
+        "source_fragment" : "update_checksum(hdr.ndp.isValid(), ..."
+      },
+      "target" : ["icmpv6", "checksum"],
+      "type" : "generic",
+      "calculation" : "calc",
+      "verify" : false,
+      "update" : true,
+      "if_cond" : {
+        "type" : "expression",
+        "value" : {
+          "op" : "d2b",
+          "left" : null,
+          "right" : {
+            "type" : "field",
+            "value" : ["ndp", "$valid$"]
+          }
+        }
+      }
+    }
+  ],
+  "force_arith" : [],
+  "extern_instances" : [],
+  "field_aliases" : [
+    [
+      "queueing_metadata.enq_timestamp",
+      ["standard_metadata", "enq_timestamp"]
+    ],
+    [
+      "queueing_metadata.enq_qdepth",
+      ["standard_metadata", "enq_qdepth"]
+    ],
+    [
+      "queueing_metadata.deq_timedelta",
+      ["standard_metadata", "deq_timedelta"]
+    ],
+    [
+      "queueing_metadata.deq_qdepth",
+      ["standard_metadata", "deq_qdepth"]
+    ],
+    [
+      "intrinsic_metadata.ingress_global_timestamp",
+      ["standard_metadata", "ingress_global_timestamp"]
+    ],
+    [
+      "intrinsic_metadata.egress_global_timestamp",
+      ["standard_metadata", "egress_global_timestamp"]
+    ],
+    [
+      "intrinsic_metadata.lf_field_list",
+      ["standard_metadata", "lf_field_list"]
+    ],
+    [
+      "intrinsic_metadata.mcast_grp",
+      ["standard_metadata", "mcast_grp"]
+    ],
+    [
+      "intrinsic_metadata.resubmit_flag",
+      ["standard_metadata", "resubmit_flag"]
+    ],
+    [
+      "intrinsic_metadata.egress_rid",
+      ["standard_metadata", "egress_rid"]
+    ],
+    [
+      "intrinsic_metadata.recirculate_flag",
+      ["standard_metadata", "recirculate_flag"]
+    ],
+    [
+      "intrinsic_metadata.priority",
+      ["standard_metadata", "priority"]
+    ]
+  ],
+  "program" : "p4src/main.p4",
+  "__meta__" : {
+    "version" : [2, 18],
+    "compiler" : "https://github.com/p4lang/p4c"
+  }
+}
\ No newline at end of file
diff --git a/src/device/tests/p4/test-p4info.txt b/src/device/tests/p4/test-p4info.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6382852ad8f597786003252184153b526c66fb9e
--- /dev/null
+++ b/src/device/tests/p4/test-p4info.txt
@@ -0,0 +1,245 @@
+pkg_info {
+  arch: "v1model"
+}
+tables {
+  preamble {
+    id: 33605373
+    name: "IngressPipeImpl.l2_exact_table"
+    alias: "l2_exact_table"
+  }
+  match_fields {
+    id: 1
+    name: "hdr.ethernet.dst_addr"
+    bitwidth: 48
+    match_type: EXACT
+  }
+  action_refs {
+    id: 16812802
+  }
+  action_refs {
+    id: 16796182
+    annotations: "@defaultonly"
+    scope: DEFAULT_ONLY
+  }
+  const_default_action_id: 16796182
+  direct_resource_ids: 318813612
+  size: 1024
+}
+tables {
+  preamble {
+    id: 33573501
+    name: "IngressPipeImpl.l2_ternary_table"
+    alias: "l2_ternary_table"
+  }
+  match_fields {
+    id: 1
+    name: "hdr.ethernet.dst_addr"
+    bitwidth: 48
+    match_type: TERNARY
+  }
+  action_refs {
+    id: 16841371
+  }
+  action_refs {
+    id: 16796182
+    annotations: "@defaultonly"
+    scope: DEFAULT_ONLY
+  }
+  const_default_action_id: 16796182
+  direct_resource_ids: 318768597
+  size: 1024
+}
+tables {
+  preamble {
+    id: 33557865
+    name: "IngressPipeImpl.acl_table"
+    alias: "acl_table"
+  }
+  match_fields {
+    id: 1
+    name: "standard_metadata.ingress_port"
+    bitwidth: 9
+    match_type: TERNARY
+  }
+  match_fields {
+    id: 2
+    name: "hdr.ethernet.dst_addr"
+    bitwidth: 48
+    match_type: TERNARY
+  }
+  match_fields {
+    id: 3
+    name: "hdr.ethernet.src_addr"
+    bitwidth: 48
+    match_type: TERNARY
+  }
+  match_fields {
+    id: 4
+    name: "hdr.ethernet.ether_type"
+    bitwidth: 16
+    match_type: TERNARY
+  }
+  match_fields {
+    id: 5
+    name: "local_metadata.ip_proto"
+    bitwidth: 8
+    match_type: TERNARY
+  }
+  match_fields {
+    id: 6
+    name: "local_metadata.icmp_type"
+    bitwidth: 8
+    match_type: TERNARY
+  }
+  match_fields {
+    id: 7
+    name: "local_metadata.l4_src_port"
+    bitwidth: 16
+    match_type: TERNARY
+  }
+  match_fields {
+    id: 8
+    name: "local_metadata.l4_dst_port"
+    bitwidth: 16
+    match_type: TERNARY
+  }
+  action_refs {
+    id: 16833331
+  }
+  action_refs {
+    id: 16782152
+  }
+  action_refs {
+    id: 16796182
+  }
+  action_refs {
+    id: 16800567
+    annotations: "@defaultonly"
+    scope: DEFAULT_ONLY
+  }
+  direct_resource_ids: 318773822
+  size: 1024
+}
+actions {
+  preamble {
+    id: 16800567
+    name: "NoAction"
+    alias: "NoAction"
+  }
+}
+actions {
+  preamble {
+    id: 16796182
+    name: "IngressPipeImpl.drop"
+    alias: "drop"
+  }
+}
+actions {
+  preamble {
+    id: 16812802
+    name: "IngressPipeImpl.set_egress_port"
+    alias: "set_egress_port"
+  }
+  params {
+    id: 1
+    name: "port_num"
+    bitwidth: 9
+  }
+}
+actions {
+  preamble {
+    id: 16841371
+    name: "IngressPipeImpl.set_multicast_group"
+    alias: "set_multicast_group"
+  }
+  params {
+    id: 1
+    name: "gid"
+    bitwidth: 16
+  }
+}
+actions {
+  preamble {
+    id: 16833331
+    name: "IngressPipeImpl.send_to_cpu"
+    alias: "send_to_cpu"
+  }
+}
+actions {
+  preamble {
+    id: 16782152
+    name: "IngressPipeImpl.clone_to_cpu"
+    alias: "clone_to_cpu"
+  }
+}
+direct_counters {
+  preamble {
+    id: 318813612
+    name: "l2_exact_table_counter"
+    alias: "l2_exact_table_counter"
+  }
+  spec {
+    unit: BOTH
+  }
+  direct_table_id: 33605373
+}
+direct_counters {
+  preamble {
+    id: 318768597
+    name: "l2_ternary_table_counter"
+    alias: "l2_ternary_table_counter"
+  }
+  spec {
+    unit: BOTH
+  }
+  direct_table_id: 33573501
+}
+direct_counters {
+  preamble {
+    id: 318773822
+    name: "acl_table_counter"
+    alias: "acl_table_counter"
+  }
+  spec {
+    unit: BOTH
+  }
+  direct_table_id: 33557865
+}
+controller_packet_metadata {
+  preamble {
+    id: 67132047
+    name: "packet_in"
+    alias: "packet_in"
+    annotations: "@controller_header(\"packet_in\")"
+  }
+  metadata {
+    id: 1
+    name: "ingress_port"
+    bitwidth: 9
+  }
+  metadata {
+    id: 2
+    name: "_pad"
+    bitwidth: 7
+  }
+}
+controller_packet_metadata {
+  preamble {
+    id: 67111875
+    name: "packet_out"
+    alias: "packet_out"
+    annotations: "@controller_header(\"packet_out\")"
+  }
+  metadata {
+    id: 1
+    name: "egress_port"
+    bitwidth: 9
+  }
+  metadata {
+    id: 2
+    name: "_pad"
+    bitwidth: 7
+  }
+}
+type_info {
+}
diff --git a/src/device/tests/test_internal_p4.py b/src/device/tests/test_internal_p4.py
new file mode 100644
index 0000000000000000000000000000000000000000..4907e538843dfa5d9c7833b4d02f05e483720510
--- /dev/null
+++ b/src/device/tests/test_internal_p4.py
@@ -0,0 +1,252 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Internal P4 driver tests.
+"""
+
+import pytest
+from device.service.drivers.p4.p4_driver import P4Driver
+from device.service.drivers.p4.p4_common import (
+    matches_mac, encode_mac, decode_mac, encode,
+    matches_ipv4, encode_ipv4, decode_ipv4,
+    matches_ipv6, encode_ipv6, decode_ipv6,
+    encode_num, decode_num
+)
+from .device_p4 import(
+        DEVICE_P4_IP_ADDR, DEVICE_P4_PORT, DEVICE_P4_DPID, DEVICE_P4_NAME,
+        DEVICE_P4_VENDOR, DEVICE_P4_HW_VER, DEVICE_P4_SW_VER,
+        DEVICE_P4_WORKERS, DEVICE_P4_GRACE_PERIOD,
+        DEVICE_P4_CONFIG_TABLE_ENTRY, DEVICE_P4_DECONFIG_TABLE_ENTRY)
+from .mock_p4runtime_service import MockP4RuntimeService
+
+
+@pytest.fixture(scope='session')
+def p4runtime_service():
+    """
+    Spawn a mock P4Runtime server.
+
+    :return: void
+    """
+    _service = MockP4RuntimeService(
+        address=DEVICE_P4_IP_ADDR, port=DEVICE_P4_PORT,
+        max_workers=DEVICE_P4_WORKERS,
+        grace_period=DEVICE_P4_GRACE_PERIOD)
+    _service.start()
+    yield _service
+    _service.stop()
+
+
+@pytest.fixture(scope='session')
+def device_driverapi_p4():
+    """
+    Invoke an instance of the P4 driver.
+
+    :return: void
+    """
+    _driver = P4Driver(
+        address=DEVICE_P4_IP_ADDR,
+        port=DEVICE_P4_PORT,
+        id=DEVICE_P4_DPID,
+        name=DEVICE_P4_NAME,
+        vendor=DEVICE_P4_VENDOR,
+        hw_ver=DEVICE_P4_HW_VER,
+        sw_ver=DEVICE_P4_SW_VER)
+    _driver.Connect()
+    yield _driver
+    _driver.Disconnect()
+
+
+def test_device_driverapi_p4_setconfig(
+        p4runtime_service: MockP4RuntimeService,
+        device_driverapi_p4: P4Driver):
+    """
+    Test the SetConfig RPC of the P4 driver API.
+
+    :param p4runtime_service: Mock P4Runtime service
+    :param device_driverapi_p4: instance of the P4 device driver
+    :return: void
+    """
+    result = device_driverapi_p4.SetConfig(
+        DEVICE_P4_CONFIG_TABLE_ENTRY
+    )
+    assert list(result)
+
+
+def test_device_driverapi_p4_getconfig(
+        p4runtime_service: MockP4RuntimeService,
+        device_driverapi_p4: P4Driver):
+    """
+    Test the GetConfig RPC of the P4 driver API.
+
+    :param p4runtime_service: Mock P4Runtime service
+    :param device_driverapi_p4: instance of the P4 device driver
+    :return: void
+    """
+    pytest.skip('Skipping test: GetConfig')
+
+
+def test_device_driverapi_p4_getresource(
+        p4runtime_service: MockP4RuntimeService,
+        device_driverapi_p4: P4Driver):
+    """
+    Test the GetResource RPC of the P4 driver API.
+
+    :param p4runtime_service: Mock P4Runtime service
+    :param device_driverapi_p4: instance of the P4 device driver
+    :return: void
+    """
+    pytest.skip('Skipping test: GetResource')
+
+
+def test_device_driverapi_p4_deleteconfig(
+        p4runtime_service: MockP4RuntimeService,
+        device_driverapi_p4: P4Driver):
+    """
+    Test the DeleteConfig RPC of the P4 driver API.
+
+    :param p4runtime_service: Mock P4Runtime service
+    :param device_driverapi_p4: instance of the P4 device driver
+    :return: void
+    """
+    result = device_driverapi_p4.DeleteConfig(
+        DEVICE_P4_DECONFIG_TABLE_ENTRY
+    )
+    assert list(result)
+
+
+def test_device_driverapi_p4_subscribe_state(
+        p4runtime_service: MockP4RuntimeService,
+        device_driverapi_p4: P4Driver):
+    """
+    Test the SubscribeState RPC of the P4 driver API.
+
+    :param p4runtime_service: Mock P4Runtime service
+    :param device_driverapi_p4: instance of the P4 device driver
+    :return: void
+    """
+    pytest.skip('Skipping test: SubscribeState')
+
+
+def test_device_driverapi_p4_getstate(
+        p4runtime_service: MockP4RuntimeService,
+        device_driverapi_p4: P4Driver):
+    """
+    Test the GetState RPC of the P4 driver API.
+
+    :param p4runtime_service: Mock P4Runtime service
+    :param device_driverapi_p4: instance of the P4 device driver
+    :return: void
+    """
+    pytest.skip('Skipping test: GetState')
+
+
+def test_device_driverapi_p4_unsubscribe_state(
+        p4runtime_service: MockP4RuntimeService,
+        device_driverapi_p4: P4Driver):
+    """
+    Test the UnsubscribeState RPC of the P4 driver API.
+
+    :param p4runtime_service: Mock P4Runtime service
+    :param device_driverapi_p4: instance of the P4 device driver
+    :return: void
+    """
+    pytest.skip('Skipping test: UnsubscribeState')
+
+
+def test_p4_common_mac():
+    """
+    Test MAC converters.
+
+    :return: void
+    """
+    wrong_mac = "aa:bb:cc:dd:ee"
+    assert not matches_mac(wrong_mac)
+
+    mac = "aa:bb:cc:dd:ee:fe"
+    assert matches_mac(mac)
+    enc_mac = encode_mac(mac)
+    assert enc_mac == b'\xaa\xbb\xcc\xdd\xee\xfe',\
+        "String-based MAC address to bytes failed"
+    enc_mac = encode(mac, 6*8)
+    assert enc_mac == b'\xaa\xbb\xcc\xdd\xee\xfe',\
+        "String-based MAC address to bytes failed"
+    dec_mac = decode_mac(enc_mac)
+    assert mac == dec_mac,\
+        "MAC address bytes to string failed"
+
+
+def test_p4_common_ipv4():
+    """
+    Test IPv4 converters.
+
+    :return: void
+    """
+    assert not matches_ipv4("10.0.0.1.5")
+    assert not matches_ipv4("256.0.0.1")
+    assert not matches_ipv4("256.0.1")
+    assert not matches_ipv4("10001")
+
+    ipv4 = "10.0.0.1"
+    assert matches_ipv4(ipv4)
+    enc_ipv4 = encode_ipv4(ipv4)
+    assert enc_ipv4 == b'\x0a\x00\x00\x01',\
+        "String-based IPv4 address to bytes failed"
+    dec_ipv4 = decode_ipv4(enc_ipv4)
+    assert ipv4 == dec_ipv4,\
+        "IPv4 address bytes to string failed"
+
+
+def test_p4_common_ipv6():
+    """
+    Test IPv6 converters.
+
+    :return: void
+    """
+    assert not matches_ipv6('10.0.0.1')
+    assert matches_ipv6('2001:0000:85a3::8a2e:370:1111')
+
+    ipv6 = "1:2:3:4:5:6:7:8"
+    assert matches_ipv6(ipv6)
+    enc_ipv6 = encode_ipv6(ipv6)
+    assert enc_ipv6 == \
+           b'\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\x07\x00\x08',\
+           "String-based IPv6 address to bytes failed"
+    dec_ipv6 = decode_ipv6(enc_ipv6)
+    assert ipv6 == dec_ipv6,\
+        "IPv6 address bytes to string failed"
+
+
+def test_p4_common_numbers():
+    """
+    Test numerical converters.
+
+    :return: void
+    """
+    num = 1337
+    byte_len = 5
+    enc_num = encode_num(num, byte_len * 8)
+    assert enc_num == b'\x00\x00\x00\x05\x39',\
+        "Number to bytes conversion failed"
+    dec_num = decode_num(enc_num)
+    assert num == dec_num,\
+        "Bytes to number conversion failed"
+    assert encode((num,), byte_len * 8) == enc_num
+    assert encode([num], byte_len * 8) == enc_num
+
+    num = 256
+    try:
+        encode_num(num, 8)
+    except OverflowError:
+        pass
diff --git a/src/device/tests/test_unit_p4.py b/src/device/tests/test_unit_p4.py
deleted file mode 100644
index 777ab280aa2b500c3c2b445fcecdf81024b817f3..0000000000000000000000000000000000000000
--- a/src/device/tests/test_unit_p4.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import pytest
-from device.service.drivers.p4.p4_driver import P4Driver
-from .device_p4 import(
-        DEVICE_P4_ADDRESS, DEVICE_P4_PORT, DEVICE_P4_DPID, DEVICE_P4_NAME,
-        DEVICE_P4_VENDOR, DEVICE_P4_HW_VER, DEVICE_P4_SW_VER,
-        DEVICE_P4_PIPECONF, DEVICE_P4_WORKERS, DEVICE_P4_GRACE_PERIOD)
-from .mock_p4runtime_service import MockP4RuntimeService
-
-
-@pytest.fixture(scope='session')
-def p4runtime_service():
-    _service = MockP4RuntimeService(
-        address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT,
-        max_workers=DEVICE_P4_WORKERS,
-        grace_period=DEVICE_P4_GRACE_PERIOD)
-    _service.start()
-    yield _service
-    _service.stop()
-
-
-@pytest.fixture(scope='session')
-def device_driverapi_p4():
-    _driver = P4Driver(
-        address=DEVICE_P4_ADDRESS,
-        port=DEVICE_P4_PORT,
-        id=DEVICE_P4_DPID,
-        name=DEVICE_P4_NAME,
-        vendor=DEVICE_P4_VENDOR,
-        hw_ver=DEVICE_P4_HW_VER,
-        sw_ver=DEVICE_P4_SW_VER,
-        pipeconf=DEVICE_P4_PIPECONF)
-    _driver.Connect()
-    yield _driver
-    _driver.Disconnect()
-
-
-def test_device_driverapi_p4_setconfig(
-        p4runtime_service: MockP4RuntimeService,
-        device_driverapi_p4: P4Driver):  # pylint: disable=redefined-outer-name
-    device_driverapi_p4.SetConfig([])
-    return
-
-
-def test_device_driverapi_p4_getconfig(
-        p4runtime_service: MockP4RuntimeService,
-        device_driverapi_p4: P4Driver):  # pylint: disable=redefined-outer-name
-    device_driverapi_p4.GetConfig()
-    return
-
-
-def test_device_driverapi_p4_getresource(
-        p4runtime_service: MockP4RuntimeService,
-        device_driverapi_p4: P4Driver):  # pylint: disable=redefined-outer-name
-    device_driverapi_p4.GetResource("")
-    return
-
-
-def test_device_driverapi_p4_getstate(
-        p4runtime_service: MockP4RuntimeService,
-        device_driverapi_p4: P4Driver):  # pylint: disable=redefined-outer-name
-    device_driverapi_p4.GetState()
-    return
-
-
-def test_device_driverapi_p4_deleteconfig(
-        p4runtime_service: MockP4RuntimeService,
-        device_driverapi_p4: P4Driver):  # pylint: disable=redefined-outer-name
-    device_driverapi_p4.DeleteConfig([])
-    return
-
-
-def test_device_driverapi_p4_subscribe_state(
-        p4runtime_service: MockP4RuntimeService,
-        device_driverapi_p4: P4Driver):  # pylint: disable=redefined-outer-name
-    device_driverapi_p4.SubscribeState([])
-    return
-
-
-def test_device_driverapi_p4_unsubscribe_state(
-        p4runtime_service: MockP4RuntimeService,
-        device_driverapi_p4: P4Driver):  # pylint: disable=redefined-outer-name
-    device_driverapi_p4.UnsubscribeState([])
-    return
diff --git a/src/device/tests/test_unitary_openconfig.py b/src/device/tests/test_unitary_openconfig.py
index 32fb5709a98d095982d46d16450117a84f89f165..6144a95d96bbbfd68213356f06573a2200c11bb1 100644
--- a/src/device/tests/test_unitary_openconfig.py
+++ b/src/device/tests/test_unitary_openconfig.py
@@ -29,8 +29,12 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import
     mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment)
 
 try:
-    from .Device_OpenConfig_Infinera1 import(
+    #from .Device_OpenConfig_Infinera1 import(
     #from .Device_OpenConfig_Infinera2 import(
+    #from .Device_OpenConfig_Adva import(
+    #from .Device_OpenConfig_Adva_149 import(
+    from .Device_OpenConfig_Adva_155 import(
+    #from .Device_OpenConfig_Cisco import(
         DEVICE_OC, DEVICE_OC_CONFIG_RULES, DEVICE_OC_DECONFIG_RULES, DEVICE_OC_CONNECT_RULES, DEVICE_OC_ID,
         DEVICE_OC_UUID)
     ENABLE_OPENCONFIG = True
@@ -38,10 +42,9 @@ except ImportError:
     ENABLE_OPENCONFIG = False
 
 ENABLE_OPENCONFIG_CONFIGURE   = True
-ENABLE_OPENCONFIG_MONITOR     = True
+ENABLE_OPENCONFIG_MONITOR     = False
 ENABLE_OPENCONFIG_DECONFIGURE = True
 
-
 logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
 logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
 logging.getLogger('monitoring-client').setLevel(logging.WARNING)
diff --git a/src/device/tests/test_unitary_p4.py b/src/device/tests/test_unitary_p4.py
index 86a669bd40deb8f7839d3e682b8a1f52f3c38e1b..43313caff33d646918b9be23c87e499185714a2c 100644
--- a/src/device/tests/test_unitary_p4.py
+++ b/src/device/tests/test_unitary_p4.py
@@ -12,22 +12,34 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, grpc, logging, pytest
-from common.proto.context_pb2 import Device, DeviceId
+"""
+P4 unit tests.
+"""
+
+import copy
+import logging
+import operator
+import grpc
+import pytest
+from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId,\
+    DeviceOperationalStatusEnum
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from device.service.DeviceService import DeviceService
 from device.service.driver_api._Driver import _Driver
-from .PrepareTestScenario import ( # pylint: disable=unused-import
+from .PrepareTestScenario import (  # pylint: disable=unused-import
     # be careful, order of symbols is important here!
-    mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment)
+    mock_service, device_service, context_client, device_client,
+    monitoring_client, test_prepare_environment)
 
 from .mock_p4runtime_service import MockP4RuntimeService
 try:
     from .device_p4 import(
-        DEVICE_P4, DEVICE_P4_ID, DEVICE_P4_UUID, DEVICE_P4_ADDRESS, DEVICE_P4_PORT, DEVICE_P4_WORKERS,
-        DEVICE_P4_GRACE_PERIOD, DEVICE_P4_CONNECT_RULES, DEVICE_P4_CONFIG_RULES)
+        DEVICE_P4, DEVICE_P4_ID, DEVICE_P4_UUID,
+        DEVICE_P4_IP_ADDR, DEVICE_P4_PORT, DEVICE_P4_WORKERS,
+        DEVICE_P4_GRACE_PERIOD, DEVICE_P4_CONNECT_RULES,
+        DEVICE_P4_CONFIG_TABLE_ENTRY, DEVICE_P4_DECONFIG_TABLE_ENTRY)
     ENABLE_P4 = True
 except ImportError:
     ENABLE_P4 = False
@@ -35,10 +47,17 @@ except ImportError:
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
+
 @pytest.fixture(scope='session')
 def p4runtime_service():
+    """
+    Spawn a mock P4Runtime server.
+
+    :return: void
+    """
     _service = MockP4RuntimeService(
-        address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT,
+        address=DEVICE_P4_IP_ADDR,
+        port=DEVICE_P4_PORT,
         max_workers=DEVICE_P4_WORKERS,
         grace_period=DEVICE_P4_GRACE_PERIOD)
     _service.start()
@@ -47,27 +66,35 @@ def p4runtime_service():
 
 
 # ----- Test Device Driver P4 --------------------------------------------------
-
 def test_device_p4_add_error_cases(
         context_client: ContextClient,   # pylint: disable=redefined-outer-name
         device_client: DeviceClient,     # pylint: disable=redefined-outer-name
         device_service: DeviceService):  # pylint: disable=redefined-outer-name
+    """
+    Test AddDevice RPC with wrong inputs.
 
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
+    :param context_client: context component client
+    :param device_client: device component client
+    :param device_service: device component service
+    :return:
+    """
 
-    with pytest.raises(grpc.RpcError) as e:
+    if not ENABLE_P4:
+        pytest.skip('Skipping test: No P4 device has been configured')
+
+    with pytest.raises(grpc.RpcError) as ex:
         device_p4_with_extra_rules = copy.deepcopy(DEVICE_P4)
         device_p4_with_extra_rules['device_config']['config_rules'].extend(
             DEVICE_P4_CONNECT_RULES)
         device_p4_with_extra_rules['device_config']['config_rules'].extend(
-            DEVICE_P4_CONFIG_RULES)
+            DEVICE_P4_CONFIG_TABLE_ENTRY)
         device_client.AddDevice(Device(**device_p4_with_extra_rules))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    assert ex.value.code() == grpc.StatusCode.INVALID_ARGUMENT
     msg_head = 'device.device_config.config_rules(['
-    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\
-               'with "_connect/" tag. Others should be configured after adding the device.'
-    except_msg = str(e.value.details())
+    msg_tail = ']) is invalid; RPC method AddDevice only accepts connection '\
+               'Config Rules that should start with "_connect/" tag. '\
+               'Others should be configured after adding the device.'
+    except_msg = str(ex.value.details())
     assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail)
 
 
@@ -76,35 +103,67 @@ def test_device_p4_add_correct(
         device_client: DeviceClient,                # pylint: disable=redefined-outer-name
         device_service: DeviceService,              # pylint: disable=redefined-outer-name
         p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+    """
+    Test AddDevice RPC with correct inputs.
+
+    :param context_client: context component client
+    :param device_client: device component client
+    :param device_service: device component service
+    :param p4runtime_service: Mock P4Runtime service
+    :return:
+    """
 
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
+    if not ENABLE_P4:
+        pytest.skip('Skipping test: No P4 device has been configured')
 
     device_p4_with_connect_rules = copy.deepcopy(DEVICE_P4)
     device_p4_with_connect_rules['device_config']['config_rules'].extend(
         DEVICE_P4_CONNECT_RULES)
     device_client.AddDevice(Device(**device_p4_with_connect_rules))
     driver_instance_cache = device_service.device_servicer.driver_instance_cache
-    driver : _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
+    driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
     assert driver is not None
 
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
+    config_rules = [
+        (
+            ConfigActionEnum.Name(config_rule.action),
+            config_rule.custom.resource_key,
+            config_rule.custom.resource_value
+        )
+        for config_rule in device_data.device_config.config_rules
+        if config_rule.WhichOneof('config_rule') == 'custom'
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule)
+                   for config_rule in config_rules])))
+
 
 def test_device_p4_get(
         context_client: ContextClient,              # pylint: disable=redefined-outer-name
         device_client: DeviceClient,                # pylint: disable=redefined-outer-name
         device_service: DeviceService,              # pylint: disable=redefined-outer-name
         p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+    """
+    Test GetDevice RPC.
+
+    :param context_client: context component client
+    :param device_client: device component client
+    :param device_service: device component service
+    :param p4runtime_service: Mock P4Runtime service
+    :return:
+    """
 
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
+    if not ENABLE_P4:
+        pytest.skip('Skipping test: No P4 device has been configured')
 
     initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_P4_ID))
-    LOGGER.info('initial_config = {:s}'.format(
-        grpc_message_to_json_string(initial_config)))
+    assert len(initial_config.config_rules) == 0
+    LOGGER.info('initial_config = %s',
+                grpc_message_to_json_string(initial_config))
 
     device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
-    LOGGER.info('device_data = {:s}'.format(
-        grpc_message_to_json_string(device_data)))
+    LOGGER.info('device_data = %s', grpc_message_to_json_string(device_data))
 
 
 def test_device_p4_configure(
@@ -112,11 +171,58 @@ def test_device_p4_configure(
         device_client: DeviceClient,                # pylint: disable=redefined-outer-name
         device_service: DeviceService,              # pylint: disable=redefined-outer-name
         p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+    """
+    Test ConfigureDevice RPC.
 
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
+    :param context_client: context component client
+    :param device_client: device component client
+    :param device_service: device component service
+    :param p4runtime_service: Mock P4Runtime service
+    :return:
+    """
 
-    pytest.skip('Skipping test for unimplemented method')
+    if not ENABLE_P4:
+        pytest.skip('Skipping test: No P4 device has been configured')
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
+    assert driver is not None
+
+    # No entries should exist at this point in time
+    driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0))
+    assert len(driver_config) == len(driver.get_manager().get_resource_keys())
+    assert driver.get_manager().count_active_entries() == 0
+
+    # Flip the operational status and check it is correctly flipped in Context
+    device_p4_with_operational_status = copy.deepcopy(DEVICE_P4)
+    device_p4_with_operational_status['device_operational_status'] = \
+        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+    device_client.ConfigureDevice(Device(**device_p4_with_operational_status))
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
+    assert device_data.device_operational_status == \
+           DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+    # Insert a new table entry
+    device_p4_with_config_rules = copy.deepcopy(DEVICE_P4)
+    device_p4_with_config_rules['device_config']['config_rules'].extend(
+        DEVICE_P4_CONFIG_TABLE_ENTRY)
+    device_client.ConfigureDevice(Device(**device_p4_with_config_rules))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action),
+         config_rule.custom.resource_key,
+         config_rule.custom.resource_value)
+        for config_rule in device_data.device_config.config_rules
+        if config_rule.WhichOneof('config_rule') == 'custom'
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(
+            ['{:s} {:s} = {:s}'.format(*config_rule)
+             for config_rule in config_rules]))
+    )
+    for config_rule in DEVICE_P4_CONFIG_TABLE_ENTRY:
+        assert 'custom' in config_rule
 
 
 def test_device_p4_deconfigure(
@@ -124,11 +230,53 @@ def test_device_p4_deconfigure(
         device_client: DeviceClient,                # pylint: disable=redefined-outer-name
         device_service: DeviceService,              # pylint: disable=redefined-outer-name
         p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+    """
+    Test DeconfigureDevice RPC.
 
-    if not ENABLE_P4: pytest.skip(
-        'Skipping test: No P4 device has been configured')
+    :param context_client: context component client
+    :param device_client: device component client
+    :param device_service: device component service
+    :param p4runtime_service: Mock P4Runtime service
+    :return:
+    """
 
-    pytest.skip('Skipping test for unimplemented method')
+    if not ENABLE_P4:
+        pytest.skip('Skipping test: No P4 device has been configured')
+
+    driver_instance_cache = device_service.device_servicer.driver_instance_cache
+    driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
+    assert driver is not None
+
+    # Delete a table entry
+    device_p4_with_config_rules = copy.deepcopy(DEVICE_P4)
+    device_p4_with_config_rules['device_config']['config_rules'].extend(
+        DEVICE_P4_DECONFIG_TABLE_ENTRY)
+    device_client.ConfigureDevice(Device(**device_p4_with_config_rules))
+
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
+    config_rules = [
+        (ConfigActionEnum.Name(config_rule.action),
+         config_rule.custom.resource_key,
+         config_rule.custom.resource_value)
+        for config_rule in device_data.device_config.config_rules
+        if config_rule.WhichOneof('config_rule') == 'custom'
+    ]
+    LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format(
+        '\n'.join(
+            ['{:s} {:s} = {:s}'.format(*config_rule)
+             for config_rule in config_rules]))
+    )
+    for config_rule in DEVICE_P4_CONFIG_TABLE_ENTRY:
+        assert 'custom' in config_rule
+
+    # Flip the operational status and check it is correctly flipped in Context
+    device_p4_with_operational_status = copy.deepcopy(DEVICE_P4)
+    device_p4_with_operational_status['device_operational_status'] = \
+        DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+    device_client.ConfigureDevice(Device(**device_p4_with_operational_status))
+    device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID))
+    assert device_data.device_operational_status == \
+           DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
 
 
 def test_device_p4_delete(
@@ -136,10 +284,20 @@ def test_device_p4_delete(
         device_client: DeviceClient,                # pylint: disable=redefined-outer-name
         device_service: DeviceService,              # pylint: disable=redefined-outer-name
         p4runtime_service: MockP4RuntimeService):   # pylint: disable=redefined-outer-name
+    """
+    Test DeleteDevice RPC.
+
+    :param context_client: context component client
+    :param device_client: device component client
+    :param device_service: device component service
+    :param p4runtime_service: Mock P4Runtime service
+    :return:
+    """
 
-    if not ENABLE_P4: pytest.skip('Skipping test: No P4 device has been configured')
+    if not ENABLE_P4:
+        pytest.skip('Skipping test: No P4 device has been configured')
 
     device_client.DeleteDevice(DeviceId(**DEVICE_P4_ID))
     driver_instance_cache = device_service.device_servicer.driver_instance_cache
-    driver : _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
+    driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID)
     assert driver is None
diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in
index e0176e0266ad6239dabb3aeedc273ddc0b638ded..50f283a1940ed99d16276857d2cab22220921879 100644
--- a/src/monitoring/requirements.in
+++ b/src/monitoring/requirements.in
@@ -5,11 +5,11 @@ fastcache==1.1.0
 #opencensus[stackdriver]
 #google-cloud-profiler
 #numpy
-Jinja2==3.0.3
-ncclient==0.6.13
-p4runtime==1.3.0
-paramiko==2.9.2
-# influx-line-protocol==0.1.4
+#Jinja2==3.0.3
+#ncclient==0.6.13
+#p4runtime==1.3.0
+#paramiko==2.9.2
+influx-line-protocol==0.1.4
 python-dateutil==2.8.2
 python-json-logger==2.0.2
 pytz==2021.3
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index 3faff440aa5f1f7ac1bcefca612be471d30f2586..529fd4335ca2393891e6d2a56f3380161d759544 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -38,8 +38,11 @@ from device.client.DeviceClient import DeviceClient
 from device.service.DeviceService import DeviceService
 from device.service.driver_api.DriverFactory import DriverFactory
 from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
-from device.service.drivers import DRIVERS
 
+os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
+from device.service.drivers import DRIVERS  # pylint: disable=wrong-import-position
+
+# pylint: disable=wrong-import-position
 from monitoring.client.MonitoringClient import MonitoringClient
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from monitoring.service import ManagementDBTools, MetricsDBTools
diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
index 70b50dae53ba22eb6c8df018fb5663cce0bc125e..76b49bc8bd4a5ded840ccad13f0941d05070d344 100644
--- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
@@ -194,9 +194,13 @@ class KDisjointPathAlgorithm(_Algorithm):
             grpc_services[service_key] = self.add_service_to_reply(reply, context_uuid, service_uuid)
 
             for num_path,service_path_ero in enumerate(paths):
+                self.logger.warning('num_path={:d}'.format(num_path))
+                self.logger.warning('service_path_ero={:s}'.format(str(service_path_ero)))
                 if service_path_ero is None: continue
                 path_hops = eropath_to_hops(service_path_ero, self.endpoint_to_link_dict)
+                self.logger.warning('path_hops={:s}'.format(str(path_hops)))
                 connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid)
+                self.logger.warning('connections={:s}'.format(str(connections)))
 
                 for connection in connections:
                     connection_uuid,device_layer,path_hops,_ = connection
@@ -221,8 +225,8 @@ class KDisjointPathAlgorithm(_Algorithm):
                     grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops)
                     grpc_connections[connection_uuid] = grpc_connection
 
-                    for service_uuid in dependencies:
-                        sub_service_key = (context_uuid, service_uuid)
+                    for sub_service_uuid in dependencies:
+                        sub_service_key = (context_uuid, sub_service_uuid)
                         grpc_sub_service = grpc_services.get(sub_service_key)
                         if grpc_sub_service is None:
                             raise Exception('Service({:s}) not found'.format(str(sub_service_key)))
diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
index bb96ff354ef32cb0a269d2b678fdb9552d86939d..b798813a83d984d6d1d75450529e9c826e220624 100644
--- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
@@ -12,11 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging, requests, uuid
+import json, logging, requests
 from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum
+from common.proto.context_pb2 import (
+    ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum,
+    ServiceTypeEnum)
 from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
-from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.ConfigRule import json_config_rule_set
 from pathcomp.frontend.Config import BACKEND_URL
 from pathcomp.frontend.service.algorithms.tools.ConstantsMappings import DEVICE_LAYER_TO_SERVICE_TYPE, DeviceLayerEnum
 from .tools.EroPathToHops import eropath_to_hops
@@ -156,6 +158,17 @@ class _Algorithm:
                     raise Exception(MSG.format(str(device_layer)))
                 service.service_type = service_type
 
+                if service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
+                    json_tapi_settings = {
+                        'capacity_value'  : 50.0,
+                        'capacity_unit'   : 'GHz',
+                        'layer_proto_name': 'PHOTONIC_MEDIA',
+                        'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC',
+                        'direction'       : 'UNIDIRECTIONAL',
+                    }
+                    config_rule = ConfigRule(**json_config_rule_set('/settings', json_tapi_settings))
+                    service.service_config.config_rules.append(config_rule)
+
             service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
 
             if path_hops is not None and len(path_hops) > 0:
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
index 7745043318a46649b26d24ae99abaed0577d6171..c1977cedb9b341fbb767a5fb8c829cd5f633884c 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
@@ -72,6 +72,10 @@ def compose_constraint(constraint : Constraint) -> Dict:
         LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint))
         return None
     constraint_type = constraint.custom.constraint_type
+    if constraint_type in {'diversity'}:
+        str_constraint = grpc_message_to_json_string(constraint)
+        LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint))
+        return None
     constraint_value = constraint.custom.constraint_value
     return {'constraint_type': constraint_type, 'constraint_value': constraint_value}
 
@@ -129,6 +133,12 @@ def compose_service(grpc_service : Service) -> Dict:
         for service_constraint in grpc_service.service_constraints
     ]))
 
+    constraint_types = {constraint['constraint_type'] for constraint in constraints}
+    if 'bandwidth[gbps]' not in constraint_types:
+        constraints.append({'constraint_type': 'bandwidth[gbps]', 'constraint_value': '20.0'})
+    if 'latency[ms]' not in constraint_types:
+        constraints.append({'constraint_type': 'latency[ms]', 'constraint_value': '20.0'})
+
     return {
         'serviceId': service_id,
         'serviceType': service_type,
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
index 5e4f5408398cca012dca52fb19bf11a2b84a5721..2ff97b96c2a33e77745239b5f944cecb19639b1d 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py
@@ -92,7 +92,7 @@ DEVICE_TYPE_TO_LAYER = {
     DeviceTypeEnum.OPEN_LINE_SYSTEM.value         : DeviceLayerEnum.OPTICAL_CONTROLLER,
 
     DeviceTypeEnum.OPTICAL_ROADM.value            : DeviceLayerEnum.OPTICAL_DEVICE,
-    DeviceTypeEnum.OPTICAL_TRANDPONDER.value      : DeviceLayerEnum.OPTICAL_DEVICE,
+    DeviceTypeEnum.OPTICAL_TRANSPONDER.value      : DeviceLayerEnum.OPTICAL_DEVICE,
 }
 
 DEVICE_LAYER_TO_SERVICE_TYPE = {
diff --git a/src/policy/README.md b/src/policy/README.md
index 4268343577871cf98b9f701a32cd8a1ff4d9a72a..a5ce13c6e68197be7909472c7ff03862909ec059 100644
--- a/src/policy/README.md
+++ b/src/policy/README.md
@@ -1,19 +1,77 @@
-# Policy Management TeraFlow OS service
+# TeraFlowSDN Policy Management service
 
-The Policy Management service is tested on Ubuntu 20.04. Follow the instructions below to build, test, and run this service on your local environment.
+This repository hosts the TeraFlowSDN Policy Management service.
+Follow the instructions below to build, test, and run this service on your local environment.
 
-## Compile code
+## TeraFlowSDN Policy Management service architecture
 
-`
+The TeraFlowSDN Policy Management service architecture consists of ten (10) interfaces listed below:
+
+Interfaces |
+|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+|  1. The `PolicyGateway` interface that implements all the RPC functions that are described in `policy.proto` file. |
+|  2. The `MonitoringGateway` interface that communicates with a `Monitoring` service gRPC client to invoke key RPC functions described in `monitoring.proto` file. |
+|  3. The `ContextGateway` interface that communicates with a `Context` service gRPC client to invoke key RPC functions described in `context.proto` file. |
+|  4. The `ServiceGateway` interface that communicates with a `Service` service gRPC client to invoke key RPC functions described in `service.proto` file. |
+|  5. The `DeviceGateway` interface that communicates with a `Device` service gRPC client to invoke key RPC functions described in `device.proto` file. |
+|  6. The `PolicyService` interface that implements the Policy RPC methods by communicating with a `Monitoring` gRPC client, a `Context` gRPC client, a `Service` gRPC client, and a `Device` gRPC client through the `MonitoringService`, `ContextService`, `ServiceService`, and `DeviceService` interfaces respectively. |
+|  7. The `MonitoringService` interface that implements the `SetKpiAlarm()` and `GetAlarmResponseStream()` methods by communicating with a `Monitoring` gRPC client through the use of the `MonitoringGateway` interface. |
+|  8. The `ContextService` interface that implements the `GetService()`, `GetDevice()`, `GetPolicyRule`, `SetPolicyRule`, and `DeletePolicyRule` methods by communicating with a `Context` gRPC client through the use of the `ContextGateway` interface. |
+|  9. The `ServiceService` interface that implements the `UpdateService()` method by communicating with a `Service` gRPC client through the use of the `ServiceGateway` interface. |
+| 10. The `DeviceService` interface that implements the `ConfigureDevice()` method by communicating with a `Device` gRPC client through the use of the `DeviceGateway` interface. |
+
+## Prerequisites
+
+The TeraFlowSDN Policy Management service is currently tested against Ubuntu 20.04 and Java 11.
+
+To quickly install Java 11 on a Debian-based Linux distro do:
+
+```bash
+sudo apt-get install openjdk-11-jdk -y
+```
+
+Feel free to try more recent Java versions.
+
+## Compile
+
+```bash
 ./mvnw compile
-`
-## Execute unit tests
+```
 
-`
+## Run tests
+
+```bash
 ./mvnw test
-`
+```
+
 ## Run service
 
-`
+```bash
 ./mvnw quarkus:dev
-`
+````
+
+## Clean
+
+```bash
+./mvnw clean
+```
+
+## Deploying on a Kubernetes cluster
+
+To create the K8s manifest file under `target/kubernetes/kubernetes.yml` to be used run
+
+```bash
+./mvnw clean package -DskipUTs -DskipITs
+```
+
+To deploy the application in a K8s cluster run
+
+```bash
+kubectl apply -f "manifests/policyservice.yaml"
+```
+
+## Maintainers
+
+This TeraFlowSDN service is implemented by [UBITECH](https://www.ubitech.eu).
+
+Feel free to contact Georgios Katsikas (gkatsikas at ubitech dot eu) in case you have questions.
diff --git a/src/service/Dockerfile b/src/service/Dockerfile
index c53a897821b759a8005118ba81a3a0f5c0b73c66..e469898e590b8797e8d3305e1c583caed41bfc80 100644
--- a/src/service/Dockerfile
+++ b/src/service/Dockerfile
@@ -64,6 +64,7 @@ RUN python3 -m pip install -r requirements.txt
 WORKDIR /var/teraflow
 COPY src/context/. context/
 COPY src/device/. device/
+COPY src/pathcomp/frontend/. pathcomp/frontend/
 COPY src/service/. service/
 
 # Start the service
diff --git a/src/service/service/ServiceService.py b/src/service/service/ServiceService.py
index b152376254b52f39c7351eca628165a4a05fac31..2f44fe01894230f84749115ce781178b7d955a36 100644
--- a/src/service/service/ServiceService.py
+++ b/src/service/service/ServiceService.py
@@ -14,9 +14,6 @@
 
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
-from common.orm.backend.BackendEnum import BackendEnum
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend
 from common.proto.service_pb2_grpc import add_ServiceServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .ServiceServiceServicerImpl import ServiceServiceServicerImpl
@@ -26,8 +23,7 @@ class ServiceService(GenericGrpcService):
     def __init__(self, service_handler_factory : ServiceHandlerFactory, cls_name: str = __name__) -> None:
         port = get_service_port_grpc(ServiceNameEnum.SERVICE)
         super().__init__(port, cls_name=cls_name)
-        database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
-        self.service_servicer = ServiceServiceServicerImpl(database, service_handler_factory)
+        self.service_servicer = ServiceServiceServicerImpl(service_handler_factory)
 
     def install_servicers(self):
         add_ServiceServiceServicer_to_server(self.service_servicer, self.server)
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index 6355cafbef0fc65338269df064a0f56e115b746e..bc71168f621afc9f0a9ed93d51844542beed813c 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -12,25 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, List
 import grpc, json, logging
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object
-from common.orm.backend.Tools import key_to_str
-from common.proto.context_pb2 import Empty, Service, ServiceId
+from typing import Optional
+from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatusEnum
+from common.proto.pathcomp_pb2 import PathCompRequest
 from common.proto.service_pb2_grpc import ServiceServiceServicer
 from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
-from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException
+from common.rpc_method_wrapper.ServiceExceptions import AlreadyExistsException, InvalidArgumentException
 from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
-from service.service.database.DeviceModel import DeviceModel
-from .database.DatabaseServiceTools import (
-    sync_service_from_context, sync_service_to_context, update_service_in_local_database)
-from .database.ServiceModel import ServiceModel
-from .path_computation_element.PathComputationElement import PathComputationElement, dump_connectivity
+from pathcomp.frontend.client.PathCompClient import PathCompClient
+from service.service.tools.ContextGetters import get_service
 from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
-from .Tools import delete_service, sync_devices_from_context, update_service
+from .task_scheduler.TaskScheduler import TasksScheduler
 
 LOGGER = logging.getLogger(__name__)
 
@@ -39,11 +33,8 @@ METHOD_NAMES = ['CreateService', 'UpdateService', 'DeleteService']
 METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
 
 class ServiceServiceServicerImpl(ServiceServiceServicer):
-    def __init__(self, database : Database, service_handler_factory : ServiceHandlerFactory) -> None:
+    def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None:
         LOGGER.debug('Creating Servicer...')
-        self.context_client = ContextClient()
-        self.device_client = DeviceClient()
-        self.database = database
         self.service_handler_factory = service_handler_factory
         LOGGER.debug('Servicer Created')
 
@@ -84,96 +75,81 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
                 extra_details='RPC method CreateService does not accept Config Rules. '\
                               'Config Rules should be configured after creating the service.')
 
-        sync_service_from_context(service_context_uuid, service_uuid, self.context_client, self.database)
-        db_service,_ = update_service_in_local_database(self.database, request)
+        # check that service does not exist
+        context_client = ContextClient()
+        current_service = get_service(context_client, request.service_id)
+        if current_service is not None:
+            context_uuid = request.service_id.context_id.context_uuid.uuid
+            service_uuid = request.service_id.service_uuid.uuid
+            raise AlreadyExistsException(
+                'Service', service_uuid, extra_details='context_uuid={:s}'.format(str(context_uuid)))
 
-        LOGGER.info('[CreateService] db_service = {:s}'.format(str(db_service.dump(
-            include_endpoint_ids=True, include_constraints=True, include_config_rules=True))))
-
-        sync_service_to_context(db_service, self.context_client)
-        return ServiceId(**db_service.dump_id())
+        # just create the service in the Context database to lock the service_id
+        # update will perform changes on the resources
+        service_id = context_client.SetService(request)
+        return service_id
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId:
         LOGGER.info('[UpdateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request)))
 
-        service_id = request.service_id
-        service_uuid = service_id.service_uuid.uuid
-        service_context_uuid = service_id.context_id.context_uuid.uuid
-
-        pce = PathComputationElement()
-        pce.load_topology(self.context_client)
-        pce.load_connectivity(self.context_client, service_id)
-        #pce.dump_topology_to_file('../data/topo.dot')
-        #pce.dump_connectivity_to_file('../data/conn-before.txt')
-        connectivity = pce.route_service(request)
-        #pce.dump_connectivity_to_file('../data/conn-after.txt')
-
-        LOGGER.info('[UpdateService] connectivity = {:s}'.format(str(dump_connectivity(connectivity))))
-
-        if connectivity is None:
-            # just update local database and context
-            str_service_key = key_to_str([service_context_uuid, service_uuid])
-            db_service = get_object(self.database, ServiceModel, str_service_key, raise_if_not_found=False)
-            LOGGER.info('[UpdateService] before db_service = {:s}'.format(str(db_service.dump(
-                include_endpoint_ids=True, include_constraints=True, include_config_rules=True))))
-            db_devices : Dict[str, DeviceModel] = sync_devices_from_context(
-                self.context_client, self.database, db_service, request.service_endpoint_ids)
-            LOGGER.info('[UpdateService] db_devices[{:d}] = {:s}'.format(
-                len(db_devices), str({
-                    device_uuid:db_device.dump(include_config_rules=True, include_drivers=True, include_endpoints=True)
-                    for device_uuid,db_device in db_devices.items()
-                })))
-            sync_service_from_context(service_context_uuid, service_uuid, self.context_client, self.database)
-            db_service,_ = update_service_in_local_database(self.database, request)
-            LOGGER.info('[UpdateService] after db_service = {:s}'.format(str(db_service.dump(
-                include_endpoint_ids=True, include_constraints=True, include_config_rules=True))))
-            sync_service_to_context(db_service, self.context_client)
-        else:
-            for sub_service, sub_connections in connectivity.get('requirements', []):
-                for sub_connection in sub_connections:
-                    update_service(
-                        self.database, self.context_client, self.device_client, self.service_handler_factory,
-                        sub_service, sub_connection)
-
-            for connection in connectivity.get('connections'):
-                db_service = update_service(
-                    self.database, self.context_client, self.device_client, self.service_handler_factory,
-                    request, connection)
-
-            str_service_key = key_to_str([service_context_uuid, service_uuid])
-            db_service = get_object(self.database, ServiceModel, str_service_key, raise_if_not_found=False)
-            if db_service is None: raise NotFoundException('Service', str_service_key)
-
-        LOGGER.info('[UpdateService] db_service = {:s}'.format(str(db_service.dump(
-            include_endpoint_ids=True, include_constraints=True, include_config_rules=True))))
-
-        return ServiceId(**db_service.dump_id())
+        # Set service status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the service is
+        # being modified.
+        context_client = ContextClient()
+        _service : Optional[Service] = get_service(context_client, request.service_id)
+        service = Service()
+        service.CopyFrom(request if _service is None else _service)
+        service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
+        context_client.SetService(service)
+
+        num_disjoint_paths = None
+        for constraint in request.service_constraints:
+            if constraint.WhichOneof('constraint') == 'sla_availability':
+                num_disjoint_paths = constraint.sla_availability.num_disjoint_paths
+                break
+
+        tasks_scheduler = TasksScheduler(self.service_handler_factory)
+        if len(request.service_endpoint_ids) >= (2 if num_disjoint_paths is None else 4):
+            pathcomp_request = PathCompRequest()
+            pathcomp_request.services.append(request)
+
+            if num_disjoint_paths is None:
+                pathcomp_request.shortest_path.Clear()
+            else:
+                pathcomp_request.k_disjoint_path.num_disjoint = num_disjoint_paths
+
+            pathcomp = PathCompClient()
+            LOGGER.info('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request)))
+            pathcomp_reply = pathcomp.Compute(pathcomp_request)
+            LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply)))
+
+            # Feed TaskScheduler with this path computation reply. TaskScheduler identifies inter-dependencies among
+            # the services and connections retrieved and produces a schedule of tasks (an ordered list of tasks to be
+            # executed) to implement the requested create/update operation.
+            tasks_scheduler.compose_from_pathcompreply(pathcomp_reply, is_delete=False)
+
+        tasks_scheduler.execute_all()
+        return request.service_id
 
     @safe_and_metered_rpc_method(METRICS, LOGGER)
     def DeleteService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[DeleteService] begin ; request = {:s}'.format(grpc_message_to_json_string(request)))
 
-        pce = PathComputationElement()
-        pce.load_topology(self.context_client)
-        pce.load_connectivity(self.context_client, request)
-        #pce.dump_topology_to_file('../data/topo.dot')
-        #pce.dump_connectivity_to_file('../data/conn-before.txt')
-        connectivity = pce.get_connectivity_from_service_id(request)
-        if connectivity is None: return Empty()
-        #pce.dump_connectivity_to_file('../data/conn-after.txt')
-
-        LOGGER.info('[DeleteService] connectivity = {:s}'.format(str(dump_connectivity(connectivity))))
-
-        for connection in connectivity.get('connections'):
-            delete_service(
-                self.database, self.context_client, self.device_client, self.service_handler_factory,
-                request, connection)
-
-        for sub_service, sub_connections in connectivity.get('requirements', []):
-            for sub_connection in sub_connections:
-                delete_service(
-                    self.database, self.context_client, self.device_client, self.service_handler_factory,
-                    sub_service.service_id, sub_connection)
-
+        context_client = ContextClient()
+
+        # Set service status to "SERVICESTATUS_PENDING_REMOVAL" to ensure rest of components are aware the service is
+        # being modified.
+        _service : Optional[Service] = get_service(context_client, request)
+        if _service is None: raise Exception('Service({:s}) not found'.format(grpc_message_to_json_string(request)))
+        service = Service()
+        service.CopyFrom(_service)
+        service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL
+        context_client.SetService(service)
+
+        # Feed TaskScheduler with this service and the sub-services and sub-connections related to this service.
+        # TaskScheduler identifies inter-dependencies among them and produces a schedule of tasks (an ordered list of
+        # tasks to be executed) to implement the requested delete operation.
+        tasks_scheduler = TasksScheduler(self.service_handler_factory)
+        tasks_scheduler.compose_from_service(service, is_delete=True)
+        tasks_scheduler.execute_all()
         return Empty()
diff --git a/src/service/service/Tools.py b/src/service/service/Tools.py
deleted file mode 100644
index 4386793c52a979cd0b3d86701a3476314857f3ac..0000000000000000000000000000000000000000
--- a/src/service/service/Tools.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Any, Dict, List, Optional, Tuple
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object, get_related_objects
-from common.orm.backend.Tools import key_to_str
-from common.proto.context_pb2 import (
-    ConfigRule, Connection, Constraint, EndPointId, Service, ServiceId, ServiceStatusEnum)
-from common.rpc_method_wrapper.ServiceExceptions import (
-    InvalidArgumentException, NotFoundException, OperationFailedException)
-from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
-from .database.ConfigModel import (
-    ConfigModel, ConfigRuleModel, ORM_ConfigActionEnum, get_config_rules, grpc_config_rules_to_raw)
-from .database.ConstraintModel import ConstraintModel, ConstraintsModel, get_constraints, grpc_constraints_to_raw
-from .database.DatabaseDeviceTools import sync_device_from_context
-from .database.DatabaseServiceTools import (
-    delete_service_from_context, sync_service_from_context, sync_service_to_context, update_service_in_local_database)
-from .database.DeviceModel import DeviceModel, DriverModel
-from .database.EndPointModel import EndPointModel, grpc_endpointids_to_raw
-from .database.RelationModels import ServiceEndPointModel
-from .database.ServiceModel import ServiceModel
-from .service_handler_api._ServiceHandler import _ServiceHandler
-from .service_handler_api.FilterFields import FilterFieldEnum
-from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
-from .service_handler_api.Tools import (
-    check_errors_deleteconfig, check_errors_deleteconstraint, check_errors_deleteendpoint, check_errors_setconfig,
-    check_errors_setconstraint, check_errors_setendpoint)
-
-LOGGER = logging.getLogger(__name__)
-
-def sync_devices_from_context(
-        context_client : ContextClient, database : Database, db_service : Optional[ServiceModel],
-        service_endpoint_ids : List[EndPointId]
-    ) -> Dict[str, DeviceModel]:
-
-    required_device_uuids = set()
-    if db_service is not None:
-        db_endpoints = get_related_objects(db_service, ServiceEndPointModel, 'endpoint_fk')
-        for db_endpoint in db_endpoints:
-            db_device = get_object(database, DeviceModel, db_endpoint.device_fk, raise_if_not_found=False)
-            required_device_uuids.add(db_device.device_uuid)
-
-    for endpoint_id in service_endpoint_ids:
-        required_device_uuids.add(endpoint_id.device_id.device_uuid.uuid)
-
-    db_devices = {}
-    devices_not_found = set()
-    for device_uuid in required_device_uuids:
-        sync_device_from_context(device_uuid, context_client, database)
-        db_device = get_object(database, DeviceModel, device_uuid, raise_if_not_found=False)
-        if db_device is None:
-            devices_not_found.add(device_uuid)
-        else:
-            db_devices[device_uuid] = db_device
-
-    if len(devices_not_found) > 0:
-        extra_details = ['Devices({:s}) cannot be retrieved from Context'.format(str(devices_not_found))]
-        raise NotFoundException('Device', '...', extra_details=extra_details)
-
-    return db_devices
-
-def classify_config_rules(
-    db_service : ServiceModel, service_config_rules : List[ConfigRule],
-    resources_to_set: List[Tuple[str, Any]], resources_to_delete : List[Tuple[str, Any]]):
-
-    context_config_rules = get_config_rules(db_service.database, db_service.pk, 'running')
-    context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules}
-    #LOGGER.info('[classify_config_rules] context_config_rules = {:s}'.format(str(context_config_rules)))
-
-    request_config_rules = grpc_config_rules_to_raw(service_config_rules)
-    #LOGGER.info('[classify_config_rules] request_config_rules = {:s}'.format(str(request_config_rules)))
-
-    for config_rule in request_config_rules:
-        action, key, value = config_rule
-        if action == ORM_ConfigActionEnum.SET:
-            if (key not in context_config_rules) or (context_config_rules[key] != value):
-                resources_to_set.append((key, value))
-        elif action == ORM_ConfigActionEnum.DELETE:
-            if key in context_config_rules:
-                resources_to_delete.append((key, value))
-        else:
-            raise InvalidArgumentException('config_rule.action', str(action), extra_details=str(request_config_rules))
-
-    #LOGGER.info('[classify_config_rules] resources_to_set = {:s}'.format(str(resources_to_set)))
-    #LOGGER.info('[classify_config_rules] resources_to_delete = {:s}'.format(str(resources_to_delete)))
-
-def classify_constraints(
-    db_service : ServiceModel, service_constraints : List[Constraint],
-    constraints_to_set: List[Tuple[str, str]], constraints_to_delete : List[Tuple[str, str]]):
-
-    context_constraints = get_constraints(db_service.database, db_service.pk, 'running')
-    context_constraints = {constraint[0]: constraint[1] for constraint in context_constraints}
-    #LOGGER.info('[classify_constraints] context_constraints = {:s}'.format(str(context_constraints)))
-
-    request_constraints = grpc_constraints_to_raw(service_constraints)
-    #LOGGER.info('[classify_constraints] request_constraints = {:s}'.format(str(request_constraints)))
-
-    for constraint in request_constraints:
-        constraint_type, constraint_value = constraint
-        if constraint_type in context_constraints:
-            if context_constraints[constraint_type] != constraint_value:
-                constraints_to_set.append(constraint)
-        else:
-            constraints_to_set.append(constraint)
-        context_constraints.pop(constraint_type, None)
-
-    for constraint in context_constraints:
-        constraints_to_delete.append(constraint)
-
-    #LOGGER.info('[classify_constraints] constraints_to_set = {:s}'.format(str(constraints_to_set)))
-    #LOGGER.info('[classify_constraints] constraints_to_delete = {:s}'.format(str(constraints_to_delete)))
-
-def get_service_endpointids(db_service : ServiceModel) -> List[Tuple[str, str, Optional[str]]]:
-    db_endpoints : List[EndPointModel] = get_related_objects(db_service, ServiceEndPointModel, 'endpoint_fk')
-    endpoint_ids = [db_endpoint.dump_id() for db_endpoint in db_endpoints]
-    return [
-        (endpoint_id['device_id']['device_uuid']['uuid'], endpoint_id['endpoint_uuid']['uuid'],
-            endpoint_id.get('topology_id', {}).get('topology_uuid', {}).get('uuid', None))
-        for endpoint_id in endpoint_ids
-    ]
-
-def classify_endpointids(
-    db_service : ServiceModel, service_endpoint_ids : List[EndPointId],
-    endpointids_to_set: List[Tuple[str, str, Optional[str]]],
-    endpointids_to_delete : List[Tuple[str, str, Optional[str]]]):
-
-    context_endpoint_ids = get_service_endpointids(db_service)
-    #LOGGER.info('[classify_endpointids] context_endpoint_ids = {:s}'.format(str(context_endpoint_ids)))
-    context_endpoint_ids = set(context_endpoint_ids)
-    #LOGGER.info('[classify_endpointids] context_endpoint_ids = {:s}'.format(str(context_endpoint_ids)))
-
-    request_endpoint_ids = grpc_endpointids_to_raw(service_endpoint_ids)
-    #LOGGER.info('[classify_endpointids] request_endpoint_ids = {:s}'.format(str(request_endpoint_ids)))
-
-    if len(service_endpoint_ids) != 2: return
-    for endpoint_id in request_endpoint_ids:
-        #if endpoint_id not in context_endpoint_ids:
-        #    endpointids_to_set.append(endpoint_id)
-        #context_endpoint_ids.discard(endpoint_id)
-        endpointids_to_set.append(endpoint_id)
-
-    #for endpoint_id in context_endpoint_ids:
-    #    endpointids_to_delete.append(endpoint_id)
-
-    #LOGGER.info('[classify_endpointids] endpointids_to_set = {:s}'.format(str(endpointids_to_set)))
-    #LOGGER.info('[classify_endpointids] endpointids_to_delete = {:s}'.format(str(endpointids_to_delete)))
-
-def get_service_handler_class(
-    service_handler_factory : ServiceHandlerFactory, db_service : ServiceModel, db_devices : Dict[str, DeviceModel]
-    ) -> Optional[_ServiceHandler]:
-
-    str_service_key = db_service.pk
-    database = db_service.database
-
-    # Assume all devices involved in the service must support at least one driver in common
-    device_drivers = None
-    for _,db_device in db_devices.items():
-        db_driver_pks = db_device.references(DriverModel)
-        db_driver_names = [DriverModel(database, pk).driver.value for pk,_ in db_driver_pks]
-        if device_drivers is None:
-            device_drivers = set(db_driver_names)
-        else:
-            device_drivers.intersection_update(db_driver_names)
-
-    filter_fields = {
-        FilterFieldEnum.SERVICE_TYPE.value  : db_service.service_type.value,    # must be supported
-        FilterFieldEnum.DEVICE_DRIVER.value : device_drivers,                   # at least one must be supported
-    }
-
-    msg = 'Selecting service handler for service({:s}) with filter_fields({:s})...'
-    LOGGER.info(msg.format(str(str_service_key), str(filter_fields)))
-    service_handler_class = service_handler_factory.get_service_handler_class(**filter_fields)
-    msg = 'ServiceHandler({:s}) selected for service({:s}) with filter_fields({:s})...'
-    LOGGER.info(msg.format(str(service_handler_class.__name__), str(str_service_key), str(filter_fields)))
-    return service_handler_class
-
-def update_service(
-        database : Database, context_client : ContextClient, device_client : DeviceClient,
-        service_handler_factory : ServiceHandlerFactory, service : Service, connection : Connection
-    ) -> ServiceModel:
-
-    service_id = service.service_id
-    service_uuid = service_id.service_uuid.uuid
-    service_context_uuid = service_id.context_id.context_uuid.uuid
-    str_service_key = key_to_str([service_context_uuid, service_uuid])
-
-    # Sync before updating service to ensure we have devices, endpoints, constraints, and config rules to be
-    # set/deleted before actuallymodifying them in the local in-memory database.
-
-    sync_service_from_context(service_context_uuid, service_uuid, context_client, database)
-    db_service = get_object(database, ServiceModel, str_service_key, raise_if_not_found=False)
-    db_devices = sync_devices_from_context(context_client, database, db_service, service.service_endpoint_ids)
-
-    if db_service is None: db_service,_ = update_service_in_local_database(database, service)
-    LOGGER.info('[update_service] db_service = {:s}'.format(str(db_service.dump(
-        include_endpoint_ids=True, include_constraints=True, include_config_rules=True))))
-
-    resources_to_set    : List[Tuple[str, Any]] = [] # resource_key, resource_value
-    resources_to_delete : List[Tuple[str, Any]] = [] # resource_key, resource_value
-    classify_config_rules(db_service, service.service_config.config_rules, resources_to_set, resources_to_delete)
-
-    constraints_to_set    : List[Tuple[str, str]] = [] # constraint_type, constraint_value
-    constraints_to_delete : List[Tuple[str, str]] = [] # constraint_type, constraint_value
-    classify_constraints(db_service, service.service_constraints, constraints_to_set, constraints_to_delete)
-
-    endpointids_to_set    : List[Tuple[str, str, Optional[str]]] = [] # device_uuid, endpoint_uuid, topology_uuid
-    endpointids_to_delete : List[Tuple[str, str, Optional[str]]] = [] # device_uuid, endpoint_uuid, topology_uuid
-    classify_endpointids(db_service, service.service_endpoint_ids, endpointids_to_set, endpointids_to_delete)
-
-    service_handler_class = get_service_handler_class(service_handler_factory, db_service, db_devices)
-    service_handler_settings = {}
-    service_handler : _ServiceHandler = service_handler_class(
-        db_service, database, context_client, device_client, **service_handler_settings)
-
-    errors = []
-
-    if len(errors) == 0:
-        results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete)
-        errors.extend(check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint))
-
-    if len(errors) == 0:
-        results_deleteconstraint = service_handler.DeleteConstraint(constraints_to_delete)
-        errors.extend(check_errors_deleteconstraint(constraints_to_delete, results_deleteconstraint))
-
-    if len(errors) == 0:
-        results_deleteconfig = service_handler.DeleteConfig(resources_to_delete)
-        errors.extend(check_errors_deleteconfig(resources_to_delete, results_deleteconfig))
-
-    if len(errors) == 0:
-        results_setconfig = service_handler.SetConfig(resources_to_set)
-        errors.extend(check_errors_setconfig(resources_to_set, results_setconfig))
-
-    if len(errors) == 0:
-        results_setconstraint = service_handler.SetConstraint(constraints_to_set)
-        errors.extend(check_errors_setconstraint(constraints_to_set, results_setconstraint))
-
-    if len(errors) == 0:
-        results_setendpoint = service_handler.SetEndpoint(endpointids_to_set)
-        errors.extend(check_errors_setendpoint(endpointids_to_set, results_setendpoint))
-
-    if len(errors) > 0:
-        raise OperationFailedException('UpdateService', extra_details=errors)
-
-    LOGGER.info('[update_service] len(service.service_endpoint_ids) = {:d}'.format(len(service.service_endpoint_ids)))
-    if len(service.service_endpoint_ids) >= 2:
-        service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE
-
-    db_service,_ = update_service_in_local_database(database, service)
-    LOGGER.info('[update_service] db_service = {:s}'.format(str(db_service.dump(
-        include_endpoint_ids=True, include_constraints=True, include_config_rules=True))))
-
-    sync_service_to_context(db_service, context_client)
-    context_client.SetConnection(connection)
-    return db_service
-
-def delete_service(
-        database : Database, context_client : ContextClient, device_client : DeviceClient,
-        service_handler_factory : ServiceHandlerFactory, service_id : ServiceId, connection : Connection
-    ) -> None:
-
-    context_client.RemoveConnection(connection.connection_id)
-
-    service_uuid = service_id.service_uuid.uuid
-    service_context_uuid = service_id.context_id.context_uuid.uuid
-    str_service_key = key_to_str([service_context_uuid, service_uuid])
-
-    # Sync before updating service to ensure we have devices, endpoints, constraints, and config rules to be
-    # set/deleted before actuallymodifying them in the local in-memory database.
-
-    sync_service_from_context(service_context_uuid, service_uuid, context_client, database)
-    db_service : ServiceModel = get_object(database, ServiceModel, str_service_key, raise_if_not_found=False)
-    if db_service is None: return
-    LOGGER.info('[delete_service] db_service = {:s}'.format(str(db_service.dump(
-        include_endpoint_ids=True, include_constraints=True, include_config_rules=True))))
-
-    db_devices = sync_devices_from_context(context_client, database, db_service, [])
-
-    resources_to_delete : List[Tuple[str, str]] = [     # resource_key, resource_value
-        (config_rule[1], config_rule[2])
-        for config_rule in get_config_rules(db_service.database, db_service.pk, 'running')
-    ]
-
-    constraints_to_delete : List[Tuple[str, str]] = [   # constraint_type, constraint_value
-        (constraint[0], constraint[1])
-        for constraint in get_constraints(db_service.database, db_service.pk, 'running')
-    ]
-
-    # device_uuid, endpoint_uuid, topology_uuid
-    endpointids_to_delete : List[Tuple[str, str, Optional[str]]] = list(set(get_service_endpointids(db_service)))
-
-    service_handler_class = get_service_handler_class(service_handler_factory, db_service, db_devices)
-    service_handler_settings = {}
-    service_handler : _ServiceHandler = service_handler_class(
-        db_service, database, context_client, device_client, **service_handler_settings)
-
-    errors = []
-
-    if len(errors) == 0:
-        results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete)
-        errors.extend(check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint))
-
-    if len(errors) == 0:
-        results_deleteconstraint = service_handler.DeleteConstraint(constraints_to_delete)
-        errors.extend(check_errors_deleteconstraint(constraints_to_delete, results_deleteconstraint))
-
-    if len(errors) == 0:
-        results_deleteconfig = service_handler.DeleteConfig(resources_to_delete)
-        errors.extend(check_errors_deleteconfig(resources_to_delete, results_deleteconfig))
-
-    if len(errors) > 0:
-        raise OperationFailedException('DeleteService', extra_details=errors)
-
-    delete_service_from_context(db_service, context_client)
-
-    for db_service_endpoint_pk,_ in db_service.references(ServiceEndPointModel):
-        ServiceEndPointModel(database, db_service_endpoint_pk).delete()
-
-    db_running_config = ConfigModel(database, db_service.service_config_fk)
-    for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel):
-        ConfigRuleModel(database, db_config_rule_pk).delete()
-
-    db_running_constraints = ConstraintsModel(database, db_service.service_constraints_fk)
-    for db_constraint_pk,_ in db_running_constraints.references(ConstraintModel):
-        ConstraintModel(database, db_constraint_pk).delete()
-
-    db_service.delete()
-    db_running_config.delete()
-    db_running_constraints.delete()
diff --git a/src/service/service/__main__.py b/src/service/service/__main__.py
index 1a67a309ff19bda2bf3174c80dfb908e99f72d14..04cf00b06bff809f837833964a9e093f18888ac2 100644
--- a/src/service/service/__main__.py
+++ b/src/service/service/__main__.py
@@ -33,14 +33,16 @@ def main():
     global LOGGER # pylint: disable=global-statement
 
     log_level = get_log_level()
-    logging.basicConfig(level=log_level)
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
     LOGGER = logging.getLogger(__name__)
 
     wait_for_environment_variables([
-        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
-        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
-        get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_HOST     ),
-        get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.CONTEXT,  ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.DEVICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.DEVICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     signal.signal(signal.SIGINT,  signal_handler)
diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py
index 9d8f9ad28f69ef606bcf2e06e3a6a17514f016b4..98113ba30fb095a29a2142e592b7759d2634eab9 100644
--- a/src/service/service/service_handler_api/FilterFields.py
+++ b/src/service/service/service_handler_api/FilterFields.py
@@ -13,15 +13,30 @@
 # limitations under the License.
 
 from enum import Enum
-from service.service.database.ServiceModel import ORM_ServiceTypeEnum
-from service.service.database.DeviceModel import ORM_DeviceDriverEnum
+from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum
 
 class FilterFieldEnum(Enum):
     SERVICE_TYPE  = 'service_type'
     DEVICE_DRIVER = 'device_driver'
 
+SERVICE_TYPE_VALUES = {
+    ServiceTypeEnum.SERVICETYPE_UNKNOWN,
+    ServiceTypeEnum.SERVICETYPE_L3NM,
+    ServiceTypeEnum.SERVICETYPE_L2NM,
+    ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
+}
+
+DEVICE_DRIVER_VALUES = {
+    DeviceDriverEnum.DEVICEDRIVER_UNDEFINED,
+    DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG,
+    DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
+    DeviceDriverEnum.DEVICEDRIVER_P4,
+    DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
+    DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352,
+}
+
 # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified
 FILTER_FIELD_ALLOWED_VALUES = {
-    FilterFieldEnum.SERVICE_TYPE.value  : {i.value for i in ORM_ServiceTypeEnum},
-    FilterFieldEnum.DEVICE_DRIVER.value : {i.value for i in ORM_DeviceDriverEnum},
+    FilterFieldEnum.SERVICE_TYPE.value  : SERVICE_TYPE_VALUES,
+    FilterFieldEnum.DEVICE_DRIVER.value : DEVICE_DRIVER_VALUES,
 }
diff --git a/src/service/service/service_handler_api/ServiceHandlerFactory.py b/src/service/service/service_handler_api/ServiceHandlerFactory.py
index 8b7223a95613a8b490862bb3dad091baf3b38388..09a56775d4f391d71fe5ac30f9be74430120e306 100644
--- a/src/service/service/service_handler_api/ServiceHandlerFactory.py
+++ b/src/service/service/service_handler_api/ServiceHandlerFactory.py
@@ -14,7 +14,9 @@
 
 import logging, operator
 from enum import Enum
-from typing import Any, Dict, Iterable, List, Set, Tuple
+from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
+from common.proto.context_pb2 import Device, Service
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 from .Exceptions import (
     UnsatisfiedFilterException, UnsupportedServiceHandlerClassException, UnsupportedFilterFieldException,
@@ -91,3 +93,40 @@ class ServiceHandlerFactory:
         candidate_service_handler_classes = sorted(
             candidate_service_handler_classes.items(), key=operator.itemgetter(1), reverse=True)
         return candidate_service_handler_classes[0][0]
+
+def get_device_supported_drivers(device : Device) -> Set[int]:
+    return {device_driver for device_driver in device.device_drivers}
+
+def get_common_device_drivers(drivers_per_device : List[Set[int]]) -> Set[int]:
+    common_device_drivers = None
+    for device_drivers in drivers_per_device:
+        if common_device_drivers is None:
+            common_device_drivers = set(device_drivers)
+        else:
+            common_device_drivers.intersection_update(device_drivers)
+    if common_device_drivers is None: common_device_drivers = set()
+    return common_device_drivers
+
+def get_service_handler_class(
+    service_handler_factory : ServiceHandlerFactory, service : Service, connection_devices : Dict[str, Device]
+) -> Optional[_ServiceHandler]:
+
+    str_service_key = grpc_message_to_json_string(service.service_id)
+
+    # Assume all devices involved in the service's connection must support at least one driver in common
+    common_device_drivers = get_common_device_drivers([
+        get_device_supported_drivers(device)
+        for device in connection_devices.values()
+    ])
+
+    filter_fields = {
+        FilterFieldEnum.SERVICE_TYPE.value  : service.service_type,     # must be supported
+        FilterFieldEnum.DEVICE_DRIVER.value : common_device_drivers,    # at least one must be supported
+    }
+
+    MSG = 'Selecting service handler for service({:s}) with filter_fields({:s})...'
+    LOGGER.info(MSG.format(str(str_service_key), str(filter_fields)))
+    service_handler_class = service_handler_factory.get_service_handler_class(**filter_fields)
+    MSG = 'ServiceHandler({:s}) selected for service({:s}) with filter_fields({:s})...'
+    LOGGER.info(MSG.format(str(service_handler_class.__name__), str(str_service_key), str(filter_fields)))
+    return service_handler_class
diff --git a/src/service/service/service_handler_api/_ServiceHandler.py b/src/service/service/service_handler_api/_ServiceHandler.py
index e724ebcc986a1c5d205c2b77d9cb944d6faeb359..9cbe3f49e8594badf3b419b24154cb59a30a17bf 100644
--- a/src/service/service/service_handler_api/_ServiceHandler.py
+++ b/src/service/service/service_handler_api/_ServiceHandler.py
@@ -19,10 +19,12 @@ from device.client.DeviceClient import DeviceClient
 from service.service.database.ServiceModel import ServiceModel
 
 class _ServiceHandler:
-    def __init__(
-        self, db_service : ServiceModel, database : Database, context_client : ContextClient,
-        device_client : DeviceClient, **settings
-    ) -> None:
+    def __init__(self,
+                 db_service: ServiceModel,
+                 database: Database,
+                 context_client: ContextClient,
+                 device_client: DeviceClient,
+                 **settings) -> None:
         """ Initialize Driver.
             Parameters:
                 db_service
@@ -30,94 +32,129 @@ class _ServiceHandler:
                 database
                     The instance of the local in-memory database.
                 context_client
-                    An instance of context client to be used to retrieve information from the service and the devices.
+                    An instance of context client to be used to retrieve
+                    information from the service and the devices.
                 device_client
-                    An instance of device client to be used to configure the devices.
+                    An instance of device client to be used to configure
+                    the devices.
                 **settings
                     Extra settings required by the service handler.
         """
         raise NotImplementedError()
 
-    def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]:
-        """ Set endpoints from a list.
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]],
+        connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        """ Create/Update service endpoints form a list.
             Parameters:
-                endpoints : List[Tuple[str, str, Optional[str]]]
-                    List of tuples, each containing a device_uuid, endpoint_uuid and, optionally, the topology_uuid
+                endpoints: List[Tuple[str, str, Optional[str]]]
+                    List of tuples, each containing a device_uuid,
+                    endpoint_uuid and, optionally, the topology_uuid
                     of the endpoint to be added.
+                connection_uuid : Optional[str]
+                    If specified, is the UUID of the connection this endpoint is associated to.
             Returns:
-                results : List[Union[bool, Exception]]
-                    List of results for endpoint changes requested. Return values must be in the same order than
-                    endpoints requested. If an endpoint is properly added, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                results: List[Union[bool, Exception]]
+                    List of results for endpoint changes requested.
+                    Return values must be in the same order as the requested
+                    endpoints. If an endpoint is properly added, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
         """
         raise NotImplementedError()
 
-    def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]:
-        """ Delete endpoints form a list.
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]],
+        connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        """ Delete service endpoints form a list.
             Parameters:
-                endpoints : List[Tuple[str, str, Optional[str]]]
-                    List of tuples, each containing a device_uuid, endpoint_uuid, and the topology_uuid of the endpoint
+                endpoints: List[Tuple[str, str, Optional[str]]]
+                    List of tuples, each containing a device_uuid,
+                    endpoint_uuid, and the topology_uuid of the endpoint
                     to be removed.
+                connection_uuid : Optional[str]
+                    If specified, is the UUID of the connection this endpoint is associated to.
             Returns:
-                results : List[Union[bool, Exception]]
-                    List of results for endpoint deletions requested. Return values must be in the same order than
-                    endpoints requested. If an endpoint is properly deleted, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                results: List[Union[bool, Exception]]
+                    List of results for endpoint deletions requested.
+                    Return values must be in the same order as the requested
+                    endpoints. If an endpoint is properly deleted, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
         """
         raise NotImplementedError()
 
-    def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
-        """ Create/Update constraints.
+    def SetConstraint(self, constraints: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Create/Update service constraints.
             Parameters:
-                constraints : List[Tuple[str, Any]]
-                    List of tuples, each containing a constraint_type and the new constraint_value to be set.
+                constraints: List[Tuple[str, Any]]
+                    List of tuples, each containing a constraint_type and the
+                    new constraint_value to be set.
             Returns:
-                results : List[Union[bool, Exception]]
-                    List of results for constraint changes requested. Return values must be in the same order than
-                    constraints requested. If a constraint is properly set, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                results: List[Union[bool, Exception]]
+                    List of results for constraint changes requested.
+                    Return values must be in the same order as the requested
+                    constraints. If a constraint is properly set, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
         """
         raise NotImplementedError()
 
-    def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
-        """ Delete constraints.
+    def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Delete service constraints.
             Parameters:
-                constraints : List[Tuple[str, Any]]
-                    List of tuples, each containing a constraint_type pointing to the constraint to be deleted, and a
-                    constraint_value containing possible additionally required values to locate the constraint to be
-                    removed.
+                constraints: List[Tuple[str, Any]]
+                    List of tuples, each containing a constraint_type pointing
+                    to the constraint to be deleted, and a constraint_value
+                    containing possible additionally required values to locate
+                    the constraint to be removed.
             Returns:
-                results : List[Union[bool, Exception]]
-                    List of results for constraint deletions requested. Return values must be in the same order than
-                    constraints requested. If a constraint is properly deleted, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                results: List[Union[bool, Exception]]
+                    List of results for constraint deletions requested.
+                    Return values must be in the same order as the requested
+                    constraints. If a constraint is properly deleted, True must
+                    be returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
         """
         raise NotImplementedError()
 
-    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
-        """ Create/Update configuration for a list of resources.
+    def SetConfig(self, resources: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Create/Update configuration for a list of service resources.
             Parameters:
-                resources : List[Tuple[str, Any]]
-                    List of tuples, each containing a resource_key pointing the resource to be modified, and a
-                    resource_value containing the new value to be set.
+                resources: List[Tuple[str, Any]]
+                    List of tuples, each containing a resource_key pointing to
+                    the resource to be modified, and a resource_value
+                    containing the new value to be set.
             Returns:
-                results : List[Union[bool, Exception]]
-                    List of results for resource key changes requested. Return values must be in the same order than
-                    resource keys requested. If a resource is properly set, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                results: List[Union[bool, Exception]]
+                    List of results for resource key changes requested.
+                    Return values must be in the same order as the requested
+                    resource keys. If a resource is properly set, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
         """
         raise NotImplementedError()
 
-    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
-        """ Delete configuration for a list of resources.
+    def DeleteConfig(self, resources: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Delete configuration for a list of service resources.
             Parameters:
-                resources : List[Tuple[str, Any]]
-                    List of tuples, each containing a resource_key pointing the resource to be modified, and a
-                    resource_value containing possible additionally required values to locate the value to be removed.
+                resources: List[Tuple[str, Any]]
+                    List of tuples, each containing a resource_key pointing to
+                    the resource to be modified, and a resource_value containing
+                    possible additionally required values to locate the value
+                    to be removed.
             Returns:
-                results : List[Union[bool, Exception]]
-                    List of results for resource key deletions requested. Return values must be in the same order than
-                    resource keys requested. If a resource is properly deleted, True must be retrieved; otherwise, the
-                    Exception that is raised during the processing must be retrieved.
+                results: List[Union[bool, Exception]]
+                    List of results for resource key deletions requested.
+                    Return values must be in the same order as the requested
+                    resource keys. If a resource is properly deleted, True must
+                    be returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
         """
         raise NotImplementedError()
diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py
index 6abe4048fb6771efc0a44f11aa40fc7739a87648..89e717722d152ce978dca10a768119d9e9adaf1e 100644
--- a/src/service/service/service_handlers/__init__.py
+++ b/src/service/service/service_handlers/__init__.py
@@ -12,28 +12,36 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ..service_handler_api.FilterFields import FilterFieldEnum, ORM_DeviceDriverEnum, ORM_ServiceTypeEnum
+from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum
+from ..service_handler_api.FilterFields import FilterFieldEnum
+from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler
 from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler
 from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler
 from .tapi_tapi.TapiServiceHandler import TapiServiceHandler
 
 SERVICE_HANDLERS = [
+    (L2NMEmulatedServiceHandler, [
+        {
+            FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_L2NM,
+            FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_UNDEFINED,
+        }
+    ]),
     (L3NMEmulatedServiceHandler, [
         {
-            FilterFieldEnum.SERVICE_TYPE  : ORM_ServiceTypeEnum.L3NM,
-            FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.UNDEFINED,
+            FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_L3NM,
+            FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_UNDEFINED,
         }
     ]),
     (L3NMOpenConfigServiceHandler, [
         {
-            FilterFieldEnum.SERVICE_TYPE  : ORM_ServiceTypeEnum.L3NM,
-            FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.OPENCONFIG,
+            FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_L3NM,
+            FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG,
         }
     ]),
     (TapiServiceHandler, [
         {
-            FilterFieldEnum.SERVICE_TYPE  : ORM_ServiceTypeEnum.TAPI_CONNECTIVITY_SERVICE,
-            FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.TRANSPORT_API,
+            FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
+            FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
         }
     ]),
 ]
diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
new file mode 100644
index 0000000000000000000000000000000000000000..18a5aea29eb7c025372d00828feb127336e90102
--- /dev/null
+++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py
@@ -0,0 +1,132 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from service.service.service_handler_api.AnyTreeTools import TreeNode
+
+def setup_config_rules(
+    service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str,
+    service_settings : TreeNode, endpoint_settings : TreeNode
+) -> List[Dict]:
+
+    json_settings          : Dict = {} if service_settings  is None else service_settings.value
+    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+
+    mtu                 = json_settings.get('mtu',                 1450 )    # 1512
+    #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
+    #bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
+    #bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
+
+    router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
+    sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+    vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
+    #address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
+    #address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
+    remote_router       = json_endpoint_settings.get('remote_router',       '0.0.0.0')  # '5.5.5.5'
+    circuit_id          = json_endpoint_settings.get('circuit_id',          '000'    )  # '111'
+
+    if_cirid_name         = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id))
+    network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id))
+    connection_point_id   = 'VC-1'
+
+    json_config_rules = [
+        json_config_rule_set(
+            '/network_instance[default]',
+            {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}),
+
+        json_config_rule_set(
+            '/network_instance[default]/protocols[OSPF]',
+            {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}),
+
+        json_config_rule_set(
+            '/network_instance[default]/protocols[STATIC]',
+            {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}),
+
+        json_config_rule_set(
+            '/network_instance[{:s}]'.format(network_instance_name),
+            {'name': network_instance_name, 'type': 'L2VSI'}),
+
+        json_config_rule_set(
+            '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index),
+            {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}),
+
+        json_config_rule_set(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
+            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}),
+
+        json_config_rule_set(
+            '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
+            {'name': network_instance_name, 'connection_point': connection_point_id, 'VC_ID': circuit_id,
+             'remote_system': remote_router}),
+    ]
+    return json_config_rules
+
+def teardown_config_rules(
+    service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str,
+    service_settings : TreeNode, endpoint_settings : TreeNode
+) -> List[Dict]:
+
+    #json_settings          : Dict = {} if service_settings  is None else service_settings.value
+    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+
+    #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
+    #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
+    #bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
+    #bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
+
+    router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
+    sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+    #vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
+    #address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
+    #address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
+    #remote_router       = json_endpoint_settings.get('remote_router',       '0.0.0.0')  # '5.5.5.5'
+    circuit_id          = json_endpoint_settings.get('circuit_id',          '000'    )  # '111'
+
+    if_cirid_name         = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id))
+    network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id))
+    connection_point_id   = 'VC-1'
+
+    json_config_rules = [
+        json_config_rule_delete(
+            '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id),
+            {'name': network_instance_name, 'connection_point': connection_point_id}),
+
+        json_config_rule_delete(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name),
+            {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}),
+
+        json_config_rule_delete(
+            '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index),
+            {'name': if_cirid_name, 'index': sub_interface_index}),
+
+        json_config_rule_delete(
+            '/network_instance[{:s}]'.format(network_instance_name),
+            {'name': network_instance_name}),
+
+        json_config_rule_delete(
+            '/network_instance[default]/protocols[STATIC]',
+            {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}),
+
+        json_config_rule_delete(
+            '/network_instance[default]/protocols[OSPF]',
+            {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}),
+
+        json_config_rule_delete(
+            '/network_instance[default]',
+            {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}),
+    ]
+    return json_config_rules
diff --git a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..19deabda3c8ddcd9f252098570ec07f82bef65a7
--- /dev/null
+++ b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py
@@ -0,0 +1,163 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import anytree, json, logging
+from typing import Any, List, Optional, Tuple, Union
+from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service
+from common.tools.object_factory.Device import json_device_id
+from common.type_checkers.Checkers import chk_length, chk_type
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from .ConfigRules import setup_config_rules, teardown_config_rules
+
+LOGGER = logging.getLogger(__name__)
+
+class L2NMEmulatedServiceHandler(_ServiceHandler):
+    def __init__(   # pylint: disable=super-init-not-called
+        self, service : Service, task_executor : TaskExecutor, **settings
+    ) -> None:
+        self.__service = service
+        self.__task_executor = task_executor # pylint: disable=unused-private-member
+        self.__resolver = anytree.Resolver(pathattr='name')
+        self.__config = TreeNode('.')
+        for config_rule in service.service_config.config_rules:
+            action = config_rule.action
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            resource_key = config_rule.custom.resource_key
+            resource_value = config_rule.custom.resource_value
+            if action == ConfigActionEnum.CONFIGACTION_SET:
+                try:
+                    resource_value = json.loads(resource_value)
+                except: # pylint: disable=bare-except
+                    pass
+                set_subnode_value(self.__resolver, self.__config, resource_key, resource_value)
+            elif action == ConfigActionEnum.CONFIGACTION_DELETE:
+                delete_subnode(self.__resolver, self.__config, resource_key)
+
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+        settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None)
+
+        results = []
+        for endpoint in endpoints:
+            try:
+                chk_type('endpoint', endpoint, (tuple, list))
+                chk_length('endpoint', endpoint, min_length=2, max_length=3)
+                device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now
+
+                endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid)
+                endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None)
+
+                json_config_rules = setup_config_rules(
+                    service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings)
+
+                device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                del device.device_config.config_rules[:]
+                for json_config_rule in json_config_rules:
+                    device.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                self.__task_executor.configure_device(device)
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint)))
+                results.append(e)
+
+        return results
+
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+        settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None)
+
+        results = []
+        for endpoint in endpoints:
+            try:
+                chk_type('endpoint', endpoint, (tuple, list))
+                chk_length('endpoint', endpoint, min_length=2, max_length=3)
+                device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now
+
+                endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid)
+                endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None)
+
+                json_config_rules = teardown_config_rules(
+                    service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings)
+
+                device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                del device.device_config.config_rules[:]
+                for json_config_rule in json_config_rules:
+                    device.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                self.__task_executor.configure_device(device)
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint)))
+                results.append(e)
+
+        return results
+
+    def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                resource_key, resource_value = resource
+                resource_value = json.loads(resource_value)
+                set_subnode_value(self.__resolver, self.__config, resource_key, resource_value)
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
+
+    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                resource_key, _ = resource
+                delete_subnode(self.__resolver, self.__config, resource_key)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
diff --git a/src/service/service/service_handlers/l2nm_emulated/__init__.py b/src/service/service/service_handlers/l2nm_emulated/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/service/service/service_handlers/l2nm_emulated/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a5aff5884c72f1384666a223a3b07da6d4ae4ec
--- /dev/null
+++ b/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py
@@ -0,0 +1,249 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from service.service.service_handler_api.AnyTreeTools import TreeNode
+
+def setup_config_rules(
+    service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str,
+    service_settings : TreeNode, endpoint_settings : TreeNode
+) -> List[Dict]:
+
+    json_settings          : Dict = {} if service_settings  is None else service_settings.value
+    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+
+    service_short_uuid        = service_uuid.split('-')[-1]
+    network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
+    network_interface_desc    = '{:s}-NetIf'.format(service_uuid)
+    network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
+
+    mtu                 = json_settings.get('mtu',                 1450 )    # 1512
+    #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
+    bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
+    bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
+
+    #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
+    sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+    vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
+    address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
+    address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
+    if_subif_name       = '{:s}.{:d}'.format(endpoint_uuid, vlan_id)
+
+    json_config_rules = [
+        json_config_rule_set(
+            '/network_instance[{:s}]'.format(network_instance_name), {
+                'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF',
+                'route_distinguisher': route_distinguisher,
+                #'router_id': router_id, 'address_families': address_families,
+        }),
+        json_config_rule_set(
+            '/interface[{:s}]'.format(endpoint_uuid), {
+                'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu,
+        }),
+        json_config_rule_set(
+            '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), {
+                'name': endpoint_uuid, 'index': sub_interface_index,
+                'description': network_subinterface_desc, 'vlan_id': vlan_id,
+                'address_ip': address_ip, 'address_prefix': address_prefix,
+        }),
+        json_config_rule_set(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
+                'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid,
+                'subinterface': sub_interface_index,
+        }),
+        json_config_rule_set(
+            '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
+                'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as,
+        }),
+        json_config_rule_set(
+            '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
+                'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP',
+                'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
+        }),
+        json_config_rule_set(
+            '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(
+                network_instance_name), {
+                'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP',
+                'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
+        }),
+        json_config_rule_set(
+            '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), {
+                'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+        }),
+        json_config_rule_set(
+            '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format(
+                network_instance_name, bgp_route_target), {
+                'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+                'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
+        }),
+        json_config_rule_set(
+            '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), {
+                'policy_name': '{:s}_import'.format(network_instance_name),
+        }),
+        json_config_rule_set(
+            '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(
+                network_instance_name, '3'), {
+                'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3',
+                'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+                'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
+        }),
+        json_config_rule_set(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format(
+                network_instance_name, network_instance_name), {
+                'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name),
+        }),
+        json_config_rule_set(
+            '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), {
+                'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+        }),
+        json_config_rule_set(
+            '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format(
+                network_instance_name, bgp_route_target), {
+                'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+                'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
+        }),
+        json_config_rule_set(
+            '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), {
+                'policy_name': '{:s}_export'.format(network_instance_name),
+        }),
+        json_config_rule_set(
+            '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(
+                network_instance_name, '3'), {
+                'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3',
+                'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+                'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
+        }),
+        json_config_rule_set(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format(
+                network_instance_name, network_instance_name), {
+                'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name),
+        }),
+    ]
+
+    return json_config_rules
+
+def teardown_config_rules(
+    service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str,
+    service_settings : TreeNode, endpoint_settings : TreeNode
+) -> List[Dict]:
+
+    json_settings          : Dict = {} if service_settings  is None else service_settings.value
+    json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
+
+    #mtu                 = json_settings.get('mtu',                 1450 )    # 1512
+    #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
+    #bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
+    bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
+
+    #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
+    sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+    vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
+    #address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
+    #address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
+
+    if_subif_name             = '{:s}.{:d}'.format(endpoint_uuid, vlan_id)
+    service_short_uuid        = service_uuid.split('-')[-1]
+    network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
+    #network_interface_desc    = '{:s}-NetIf'.format(service_uuid)
+    #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
+
+    json_config_rules = [
+        json_config_rule_delete(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
+                'name': network_instance_name, 'id': if_subif_name,
+        }),
+        json_config_rule_delete(
+            '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), {
+                'name': endpoint_uuid, 'index': sub_interface_index,
+        }),
+        json_config_rule_delete(
+            '/interface[{:s}]'.format(endpoint_uuid), {
+                'name': endpoint_uuid,
+        }),
+        json_config_rule_delete(
+            '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(
+                network_instance_name), {
+                'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP',
+                'address_family': 'IPV4',
+        }),
+        json_config_rule_delete(
+            '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
+                'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP',
+                'address_family': 'IPV4',
+        }),
+        json_config_rule_delete(
+            '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
+                'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP',
+        }),
+        json_config_rule_delete(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format(
+                network_instance_name, network_instance_name), {
+            'name': network_instance_name,
+        }),
+        json_config_rule_delete(
+            '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(
+                network_instance_name, '3'), {
+                'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3',
+        }),
+        json_config_rule_delete(
+            '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), {
+                'policy_name': '{:s}_import'.format(network_instance_name),
+        }),
+        json_config_rule_delete(
+            '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format(
+                network_instance_name, bgp_route_target), {
+                'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+                'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
+        }),
+        json_config_rule_delete(
+            '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), {
+                'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+        }),
+        json_config_rule_delete(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format(
+                network_instance_name, network_instance_name), {
+                'name': network_instance_name,
+        }),
+        json_config_rule_delete(
+            '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(
+                network_instance_name, '3'), {
+                'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3',
+        }),
+        json_config_rule_delete(
+            '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), {
+                'policy_name': '{:s}_export'.format(network_instance_name),
+        }),
+        json_config_rule_delete(
+            '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format(
+                network_instance_name, bgp_route_target), {
+                'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+                'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
+        }),
+        json_config_rule_delete(
+            '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), {
+                'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+        }),
+        json_config_rule_delete(
+            '/network_instance[{:s}]'.format(network_instance_name), {
+                'name': network_instance_name
+        }),
+    ]
+    return json_config_rules
diff --git a/src/service/service/service_handlers/l3nm_emulated/ConfigRulesOld.py b/src/service/service/service_handlers/l3nm_emulated/ConfigRulesOld.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b12049bae40829ed329c3509bdeaedbf55badb4
--- /dev/null
+++ b/src/service/service/service_handlers/l3nm_emulated/ConfigRulesOld.py
@@ -0,0 +1,109 @@
+
+                # json_endpoint_settings : Dict = endpoint_settings.value
+                # #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+                # route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
+                # sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+                # vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
+                # address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
+                # address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
+                # if_subif_name       = '{:s}.{:d}'.format(endpoint_uuid, vlan_id)
+
+                # db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True)
+                # device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                # json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True)
+                # json_device_config : Dict = json_device.setdefault('device_config', {})
+                # json_device_config_rules : List = json_device_config.setdefault('config_rules', [])
+                # json_device_config_rules.extend([
+                #     json_config_rule_set(
+                #         '/network_instance[{:s}]'.format(network_instance_name), {
+                #             'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF',
+                #             'route_distinguisher': route_distinguisher,
+                #             #'router_id': router_id, 'address_families': address_families,
+                #     }),
+                #     json_config_rule_set(
+                #         '/interface[{:s}]'.format(endpoint_uuid), {
+                #             'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu,
+                #     }),
+                #     json_config_rule_set(
+                #         '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), {
+                #             'name': endpoint_uuid, 'index': sub_interface_index,
+                #             'description': network_subinterface_desc, 'vlan_id': vlan_id,
+                #             'address_ip': address_ip, 'address_prefix': address_prefix,
+                #     }),
+                #     json_config_rule_set(
+                #         '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
+                #             'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid,
+                #             'subinterface': sub_interface_index,
+                #     }),
+                #     json_config_rule_set(
+                #         '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
+                #             'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as,
+                #     }),
+                #     json_config_rule_set(
+                #         '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
+                #             'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP',
+                #             'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
+                #     }),
+                #     json_config_rule_set(
+                #         '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(
+                #             network_instance_name), {
+                #             'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP',
+                #             'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), {
+                #             'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format(
+                #             network_instance_name, bgp_route_target), {
+                #             'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+                #             'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), {
+                #             'policy_name': '{:s}_import'.format(network_instance_name),
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(
+                #             network_instance_name, '3'), {
+                #             'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3',
+                #             'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
+                #             'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
+                #     }),
+                #     json_config_rule_set(
+                #         # pylint: disable=duplicate-string-formatting-argument
+                #         '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format(
+                #             network_instance_name, network_instance_name), {
+                #             'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name),
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), {
+                #             'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format(
+                #             network_instance_name, bgp_route_target), {
+                #             'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+                #             'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), {
+                #             'policy_name': '{:s}_export'.format(network_instance_name),
+                #     }),
+                #     json_config_rule_set(
+                #         '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(
+                #             network_instance_name, '3'), {
+                #             'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3',
+                #             'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
+                #             'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
+                #     }),
+                #     json_config_rule_set(
+                #         # pylint: disable=duplicate-string-formatting-argument
+                #         '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format(
+                #             network_instance_name, network_instance_name), {
+                #             'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name),
+                #     }),
+                # ])
+                # self.__device_client.ConfigureDevice(Device(**json_device))
+                # results.append(True)
diff --git a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py
index 316b2ef8739efadf3f9f40d76d4e698117cc505f..54fb52630c68154865513d3969cdee9a06848c01 100644
--- a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py
+++ b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py
@@ -13,188 +13,66 @@
 # limitations under the License.
 
 import anytree, json, logging
-from typing import Any, Dict, List, Optional, Tuple, Union
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object
-from common.orm.backend.Tools import key_to_str
-from common.proto.context_pb2 import Device
-from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from typing import Any, List, Optional, Tuple, Union
+from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service
+from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_length, chk_type
-from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
-from service.service.database.ConfigModel import ORM_ConfigActionEnum, get_config_rules
-from service.service.database.ContextModel import ContextModel
-from service.service.database.DeviceModel import DeviceModel
-from service.service.database.ServiceModel import ServiceModel
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from .ConfigRules import setup_config_rules, teardown_config_rules
 
 LOGGER = logging.getLogger(__name__)
 
 class L3NMEmulatedServiceHandler(_ServiceHandler):
     def __init__(   # pylint: disable=super-init-not-called
-        self, db_service : ServiceModel, database : Database, context_client : ContextClient,
-        device_client : DeviceClient, **settings
+        self, service : Service, task_executor : TaskExecutor, **settings
     ) -> None:
-        self.__db_service = db_service
-        self.__database = database
-        self.__context_client = context_client # pylint: disable=unused-private-member
-        self.__device_client = device_client
-
-        self.__db_context : ContextModel = get_object(self.__database, ContextModel, self.__db_service.context_fk)
-        str_service_key = key_to_str([self.__db_context.context_uuid, self.__db_service.service_uuid])
-        db_config = get_config_rules(self.__database, str_service_key, 'running')
+        self.__service = service
+        self.__task_executor = task_executor # pylint: disable=unused-private-member
         self.__resolver = anytree.Resolver(pathattr='name')
         self.__config = TreeNode('.')
-        for action, resource_key, resource_value in db_config:
-            if action == ORM_ConfigActionEnum.SET:
+        for config_rule in service.service_config.config_rules:
+            action = config_rule.action
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            resource_key = config_rule.custom.resource_key
+            resource_value = config_rule.custom.resource_value
+            if action == ConfigActionEnum.CONFIGACTION_SET:
                 try:
                     resource_value = json.loads(resource_value)
                 except: # pylint: disable=bare-except
                     pass
                 set_subnode_value(self.__resolver, self.__config, resource_key, resource_value)
-            elif action == ORM_ConfigActionEnum.DELETE:
+            elif action == ConfigActionEnum.CONFIGACTION_DELETE:
                 delete_subnode(self.__resolver, self.__config, resource_key)
 
-    def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]:
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
         chk_type('endpoints', endpoints, list)
         if len(endpoints) == 0: return []
 
-        service_uuid              = self.__db_service.service_uuid
-        service_short_uuid        = service_uuid.split('-')[-1]
-        network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
-        network_interface_desc    = '{:s}-NetIf'.format(service_uuid)
-        network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
-
+        service_uuid = self.__service.service_id.service_uuid.uuid
         settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None)
-        if settings is None: raise Exception('Unable to retrieve service settings')
-        json_settings : Dict = settings.value
-        mtu                 = json_settings.get('mtu',                 1450 )    # 1512
-        #address_families    = json_settings.get('address_families',    []   )    # ['IPV4']
-        bgp_as              = json_settings.get('bgp_as',              0    )    # 65000
-        bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
 
         results = []
         for endpoint in endpoints:
             try:
                 chk_type('endpoint', endpoint, (tuple, list))
                 chk_length('endpoint', endpoint, min_length=2, max_length=3)
-                if len(endpoint) == 2:
-                    device_uuid, endpoint_uuid = endpoint
-                else:
-                    device_uuid, endpoint_uuid, _ = endpoint # ignore topology_uuid by now
+                device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now
 
                 endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid)
                 endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None)
-                if endpoint_settings is None:
-                    raise Exception('Unable to retrieve service settings for endpoint({:s})'.format(
-                        str(endpoint_settings_uri)))
-                json_endpoint_settings : Dict = endpoint_settings.value
-                #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
-                route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0'    )  # '60001:801'
-                sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
-                vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
-                address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
-                address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
-                if_subif_name       = '{:s}.{:d}'.format(endpoint_uuid, vlan_id)
 
-                db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True)
-                json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True)
-                json_device_config : Dict = json_device.setdefault('device_config', {})
-                json_device_config_rules : List = json_device_config.setdefault('config_rules', [])
-                json_device_config_rules.extend([
-                    json_config_rule_set(
-                        '/network_instance[{:s}]'.format(network_instance_name), {
-                            'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF',
-                            'route_distinguisher': route_distinguisher,
-                            #'router_id': router_id, 'address_families': address_families,
-                    }),
-                    json_config_rule_set(
-                        '/interface[{:s}]'.format(endpoint_uuid), {
-                            'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu,
-                    }),
-                    json_config_rule_set(
-                        '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), {
-                            'name': endpoint_uuid, 'index': sub_interface_index,
-                            'description': network_subinterface_desc, 'vlan_id': vlan_id,
-                            'address_ip': address_ip, 'address_prefix': address_prefix,
-                    }),
-                    json_config_rule_set(
-                        '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
-                            'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid,
-                            'subinterface': sub_interface_index,
-                    }),
-                    json_config_rule_set(
-                        '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
-                            'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as,
-                    }),
-                    json_config_rule_set(
-                        '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
-                            'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP',
-                            'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
-                    }),
-                    json_config_rule_set(
-                        '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(
-                            network_instance_name), {
-                            'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP',
-                            'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), {
-                            'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format(
-                            network_instance_name, bgp_route_target), {
-                            'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
-                            'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), {
-                            'policy_name': '{:s}_import'.format(network_instance_name),
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(
-                            network_instance_name, '3'), {
-                            'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3',
-                            'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
-                            'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
-                    }),
-                    json_config_rule_set(
-                        # pylint: disable=duplicate-string-formatting-argument
-                        '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format(
-                            network_instance_name, network_instance_name), {
-                            'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name),
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), {
-                            'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format(
-                            network_instance_name, bgp_route_target), {
-                            'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
-                            'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), {
-                            'policy_name': '{:s}_export'.format(network_instance_name),
-                    }),
-                    json_config_rule_set(
-                        '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(
-                            network_instance_name, '3'), {
-                            'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3',
-                            'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
-                            'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
-                    }),
-                    json_config_rule_set(
-                        # pylint: disable=duplicate-string-formatting-argument
-                        '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format(
-                            network_instance_name, network_instance_name), {
-                            'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name),
-                    }),
-                ])
-                self.__device_client.ConfigureDevice(Device(**json_device))
+                json_config_rules = setup_config_rules(
+                    service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings)
+
+                device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                del device.device_config.config_rules[:]
+                for json_config_rule in json_config_rules:
+                    device.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                self.__task_executor.configure_device(device)
                 results.append(True)
             except Exception as e: # pylint: disable=broad-except
                 LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint)))
@@ -202,127 +80,33 @@ class L3NMEmulatedServiceHandler(_ServiceHandler):
 
         return results
 
-    def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]:
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
         chk_type('endpoints', endpoints, list)
         if len(endpoints) == 0: return []
 
-        service_uuid              = self.__db_service.service_uuid
-        service_short_uuid        = service_uuid.split('-')[-1]
-        network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
-
+        service_uuid = self.__service.service_id.service_uuid.uuid
         settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None)
-        if settings is None: raise Exception('Unable to retrieve service settings')
-        json_settings : Dict = settings.value
-        bgp_route_target    = json_settings.get('bgp_route_target',    '0:0')    # 65000:333
 
         results = []
         for endpoint in endpoints:
             try:
                 chk_type('endpoint', endpoint, (tuple, list))
                 chk_length('endpoint', endpoint, min_length=2, max_length=3)
-                if len(endpoint) == 2:
-                    device_uuid, endpoint_uuid = endpoint
-                else:
-                    device_uuid, endpoint_uuid, _ = endpoint # ignore topology_uuid by now
+                device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now
 
                 endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid)
                 endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None)
-                if endpoint_settings is None:
-                    raise Exception('Unable to retrieve service settings for endpoint({:s})'.format(
-                        str(endpoint_settings_uri)))
-                json_endpoint_settings : Dict = endpoint_settings.value
-                sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
-                vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
-                if_subif_name       = '{:s}.{:d}'.format(endpoint_uuid, vlan_id)
 
-                db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True)
-                json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True)
-                json_device_config : Dict = json_device.setdefault('device_config', {})
-                json_device_config_rules : List = json_device_config.setdefault('config_rules', [])
-                json_device_config_rules.extend([
-                    json_config_rule_delete(
-                        '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
-                            'name': network_instance_name, 'id': if_subif_name,
-                    }),
-                    json_config_rule_delete(
-                        '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), {
-                            'name': endpoint_uuid, 'index': sub_interface_index,
-                    }),
-                    json_config_rule_delete(
-                        '/interface[{:s}]'.format(endpoint_uuid), {
-                            'name': endpoint_uuid,
-                    }),
-                    json_config_rule_delete(
-                        '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(
-                            network_instance_name), {
-                            'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP',
-                            'address_family': 'IPV4',
-                    }),
-                    json_config_rule_delete(
-                        '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
-                            'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP',
-                            'address_family': 'IPV4',
-                    }),
-                    json_config_rule_delete(
-                        '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
-                            'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP',
-                    }),
-                    json_config_rule_delete(
-                        # pylint: disable=duplicate-string-formatting-argument
-                        '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format(
-                            network_instance_name, network_instance_name), {
-                        'name': network_instance_name,
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(
-                            network_instance_name, '3'), {
-                            'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3',
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), {
-                            'policy_name': '{:s}_import'.format(network_instance_name),
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format(
-                            network_instance_name, bgp_route_target), {
-                            'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
-                            'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), {
-                            'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
-                    }),
-                    json_config_rule_delete(
-                        # pylint: disable=duplicate-string-formatting-argument
-                        '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format(
-                            network_instance_name, network_instance_name), {
-                            'name': network_instance_name,
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(
-                            network_instance_name, '3'), {
-                            'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3',
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), {
-                            'policy_name': '{:s}_export'.format(network_instance_name),
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format(
-                            network_instance_name, bgp_route_target), {
-                            'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
-                            'ext_community_member'  : 'route-target:{:s}'.format(bgp_route_target),
-                    }),
-                    json_config_rule_delete(
-                        '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), {
-                            'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
-                    }),
-                    json_config_rule_delete(
-                        '/network_instance[{:s}]'.format(network_instance_name), {
-                            'name': network_instance_name
-                    }),
-                ])
-                self.__device_client.ConfigureDevice(Device(**json_device))
+                json_config_rules = teardown_config_rules(
+                    service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings)
+
+                device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                del device.device_config.config_rules[:]
+                for json_config_rule in json_config_rules:
+                    device.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                self.__task_executor.configure_device(device)
                 results.append(True)
             except Exception as e: # pylint: disable=broad-except
                 LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint)))
diff --git a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py
index 1249af0ae7944f09bd12f2fab4e6e78523320c06..14a5e111b18fa46fc7a7e89f213cb9fe68684f1b 100644
--- a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py
+++ b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py
@@ -14,58 +14,54 @@
 
 import anytree, json, logging
 from typing import Any, Dict, List, Optional, Tuple, Union
-from common.orm.Database import Database
 from common.orm.HighLevel import get_object
-from common.orm.backend.Tools import key_to_str
-from common.proto.context_pb2 import Device
+from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, Device, DeviceId, Service
 from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
-from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
-from service.service.database.ConfigModel import ORM_ConfigActionEnum, get_config_rules
-from service.service.database.ContextModel import ContextModel
 from service.service.database.DeviceModel import DeviceModel
-from service.service.database.ServiceModel import ServiceModel
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
 
 LOGGER = logging.getLogger(__name__)
 
 class TapiServiceHandler(_ServiceHandler):
     def __init__(   # pylint: disable=super-init-not-called
-        self, db_service : ServiceModel, database : Database, context_client : ContextClient,
-        device_client : DeviceClient, **settings
+        self, service : Service, task_executor : TaskExecutor, **settings
     ) -> None:
-        self.__db_service = db_service
-        self.__database = database
-        self.__context_client = context_client # pylint: disable=unused-private-member
-        self.__device_client = device_client
-
-        self.__db_context : ContextModel = get_object(self.__database, ContextModel, self.__db_service.context_fk)
-        str_service_key = key_to_str([self.__db_context.context_uuid, self.__db_service.service_uuid])
-        db_config = get_config_rules(self.__database, str_service_key, 'running')
+        self.__service = service
+        self.__task_executor = task_executor # pylint: disable=unused-private-member
         self.__resolver = anytree.Resolver(pathattr='name')
         self.__config = TreeNode('.')
-        for action, resource_key, resource_value in db_config:
-            if action == ORM_ConfigActionEnum.SET:
+        for config_rule in service.service_config.config_rules:
+            action = config_rule.action
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            resource_key = config_rule.custom.resource_key
+            resource_value = config_rule.custom.resource_value
+            if action == ConfigActionEnum.CONFIGACTION_SET:
                 try:
                     resource_value = json.loads(resource_value)
                 except: # pylint: disable=bare-except
                     pass
                 set_subnode_value(self.__resolver, self.__config, resource_key, resource_value)
-            elif action == ORM_ConfigActionEnum.DELETE:
+            elif action == ConfigActionEnum.CONFIGACTION_DELETE:
                 delete_subnode(self.__resolver, self.__config, resource_key)
 
-    def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]:
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        LOGGER.info('[SetEndpoint] endpoints={:s}'.format(str(endpoints)))
+        LOGGER.info('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid)))
         chk_type('endpoints', endpoints, list)
         if len(endpoints) != 2: return []
 
-        service_uuid = self.__db_service.service_uuid
-        service_settings : TreeNode = get_subnode(self.__resolver, self.__config, 'settings', None)
-        if service_settings is None: raise Exception('Unable to settings for Service({:s})'.format(str(service_uuid)))
+        service_uuid = self.__service.service_id.service_uuid.uuid
+        settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None)
+        if settings is None: raise Exception('Unable to retrieve settings for Service({:s})'.format(str(service_uuid)))
 
-        json_settings : Dict = service_settings.value
-        capacity_value   = json_settings.get('capacity_value',   1)
+        json_settings : Dict = settings.value
+        capacity_value   = json_settings.get('capacity_value',   50.0)
         capacity_unit    = json_settings.get('capacity_unit',    'GHz')
         layer_proto_name = json_settings.get('layer_proto_name', 'PHOTONIC_MEDIA')
         layer_proto_qual = json_settings.get('layer_proto_qual', 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC')
@@ -74,46 +70,45 @@ class TapiServiceHandler(_ServiceHandler):
         results = []
         try:
             device_uuid = endpoints[0][0]
-            db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True)
-            json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True)
-            json_device_config : Dict = json_device.setdefault('device_config', {})
-            json_device_config_rules : List = json_device_config.setdefault('config_rules', [])
-            json_device_config_rules.extend([
-                json_config_rule_set('/service[{:s}]'.format(service_uuid), {
-                    'uuid'                    : service_uuid,
-                    'input_sip'               : endpoints[0][1],
-                    'output_sip'              : endpoints[1][1],
-                    'capacity_unit'           : capacity_unit,
-                    'capacity_value'          : capacity_value,
-                    'layer_protocol_name'     : layer_proto_name,
-                    'layer_protocol_qualifier': layer_proto_qual,
-                    'direction'               : direction,
-                }),
-            ])
-            self.__device_client.ConfigureDevice(Device(**json_device))
+            device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+            json_config_rule = json_config_rule_set('/service[{:s}]'.format(service_uuid), {
+                'uuid'                    : service_uuid,
+                'input_sip'               : endpoints[0][1],
+                'output_sip'              : endpoints[1][1],
+                'capacity_unit'           : capacity_unit,
+                'capacity_value'          : capacity_value,
+                'layer_protocol_name'     : layer_proto_name,
+                'layer_protocol_qualifier': layer_proto_qual,
+                'direction'               : direction,
+            })
+            del device.device_config.config_rules[:]
+            device.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(device)
             results.append(True)
         except Exception as e: # pylint: disable=broad-except
-            LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid)))
+            LOGGER.exception('Unable to configure Service({:s})'.format(str(service_uuid)))
             results.append(e)
 
         return results
 
-    def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]:
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        LOGGER.info('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints)))
+        LOGGER.info('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid)))
+
         chk_type('endpoints', endpoints, list)
         if len(endpoints) != 2: return []
 
-        service_uuid = self.__db_service.service_uuid
+        service_uuid = self.__service.service_id.service_uuid.uuid
         results = []
         try:
             device_uuid = endpoints[0][0]
-            db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True)
-            json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True)
-            json_device_config : Dict = json_device.setdefault('device_config', {})
-            json_device_config_rules : List = json_device_config.setdefault('config_rules', [])
-            json_device_config_rules.extend([
-                json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid})
-            ])
-            self.__device_client.ConfigureDevice(Device(**json_device))
+            device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+            json_config_rule = json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid})
+            del device.device_config.config_rules[:]
+            device.device_config.config_rules.append(ConfigRule(**json_config_rule))
+            self.__task_executor.configure_device(device)
             results.append(True)
         except Exception as e: # pylint: disable=broad-except
             LOGGER.exception('Unable to DeleteEndpoint for Service({:s})'.format(str(service_uuid)))
diff --git a/src/service/service/task_scheduler/ConnectionExpander.py b/src/service/service/task_scheduler/ConnectionExpander.py
new file mode 100644
index 0000000000000000000000000000000000000000..39c91b1ba7129d6915ab578f2e85b670049def04
--- /dev/null
+++ b/src/service/service/task_scheduler/ConnectionExpander.py
@@ -0,0 +1,66 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List, Optional, Tuple
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.proto.context_pb2 import Connection, Empty, EndPointId, Link
+from context.client.ContextClient import ContextClient
+
+class ConnectionExpander:
+    def __init__(self) -> None:
+        self.context_client = ContextClient()
+        self.endpointkey_to_link : Dict[Tuple[str, str], Link] = dict()
+        self.refresh_links()
+    
+    def refresh_links(self) -> None:
+        links = self.context_client.ListLinks(Empty())
+        for link in links.links:
+            for link_endpoint_id in link.link_endpoint_ids:
+                device_uuid = link_endpoint_id.device_id.device_uuid.uuid
+                endpoint_uuid = link_endpoint_id.endpoint_uuid.uuid
+                endpoint_key = (device_uuid, endpoint_uuid)
+                self.endpointkey_to_link[endpoint_key] = link
+
+    def get_link_from_endpoint_id(self, endpoint_id : EndPointId, raise_if_not_found : bool = False) -> Optional[Link]:
+        device_uuid = endpoint_id.device_id.device_uuid.uuid
+        endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+        endpoint_key = (device_uuid, endpoint_uuid)
+        link = self.endpointkey_to_link.get(endpoint_key)
+        if link is None and raise_if_not_found:
+            str_endpoint_id = grpc_message_to_json_string(endpoint_id)
+            raise Exception('Link for Endpoint({:s}) not found'.format(str_endpoint_id))
+        return link
+
+    def get_links(self, connection : Connection) -> List[Link]:
+        path_links = list()
+        last_link_uuid = None
+        for endpoint_id in connection.path_hops_endpoint_ids:
+            link = self.get_link_from_endpoint_id(endpoint_id, raise_if_not_found=True)
+            link_uuid = link.link_id.link_uuid.uuid
+            if last_link_uuid is None or last_link_uuid != link_uuid:
+                path_links.append(link)
+                last_link_uuid = link_uuid
+        return path_links
+
+    def get_endpoints_traversed(self, connection : Connection) -> List[EndPointId]:
+        path_endpoint_ids = list()
+        last_link_uuid = None
+        for endpoint_id in connection.path_hops_endpoint_ids:
+            link = self.get_link_from_endpoint_id(endpoint_id, raise_if_not_found=True)
+            link_uuid = link.link_id.link_uuid.uuid
+            if last_link_uuid is None or last_link_uuid != link_uuid:
+                for link_endpoint_id in link.link_endpoint_ids:
+                    path_endpoint_ids.append(link_endpoint_id)
+                last_link_uuid = link_uuid
+        return path_endpoint_ids
diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py
new file mode 100644
index 0000000000000000000000000000000000000000..416e1698f2432e22ae5cfe8e437570fc7d3c8880
--- /dev/null
+++ b/src/service/service/task_scheduler/TaskExecutor.py
@@ -0,0 +1,142 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+from typing import Any, Dict, Optional, Union
+from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
+from common.rpc_method_wrapper.ServiceExceptions import NotFoundException
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class
+from service.service.tools.ContextGetters import get_connection, get_device, get_service
+from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key
+
+CacheableObject = Union[Connection, Device, Service]
+
+class CacheableObjectType(Enum):
+    CONNECTION = 'connection'
+    DEVICE     = 'device'
+    SERVICE    = 'service'
+
+class TaskExecutor:
+    def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None:
+        self._service_handler_factory = service_handler_factory
+        self._context_client = ContextClient()
+        self._device_client = DeviceClient()
+        self._grpc_objects_cache : Dict[str, CacheableObject] = dict()
+
+    @property
+    def service_handler_factory(self) -> ServiceHandlerFactory: return self._service_handler_factory
+
+    # ----- Common methods ---------------------------------------------------------------------------------------------
+
+    def _load_grpc_object(self, object_type : CacheableObjectType, object_key : str) -> Optional[CacheableObject]:
+        object_key = '{:s}:{:s}'.format(object_type.value, object_key)
+        return self._grpc_objects_cache.get(object_key)
+
+    def _store_grpc_object(self, object_type : CacheableObjectType, object_key : str, grpc_object) -> None:
+        object_key = '{:s}:{:s}'.format(object_type.value, object_key)
+        self._grpc_objects_cache[object_key] = grpc_object
+    
+    def _delete_grpc_object(self, object_type : CacheableObjectType, object_key : str) -> None:
+        object_key = '{:s}:{:s}'.format(object_type.value, object_key)
+        self._grpc_objects_cache.pop(object_key, None)
+
+    def _store_editable_grpc_object(
+        self, object_type : CacheableObjectType, object_key : str, grpc_class, grpc_ro_object
+    ) -> Any:
+        grpc_rw_object = grpc_class()
+        grpc_rw_object.CopyFrom(grpc_ro_object)
+        self._store_grpc_object(object_type, object_key, grpc_rw_object)
+        return grpc_rw_object
+
+    # ----- Connection-related methods ---------------------------------------------------------------------------------
+
+    def get_connection(self, connection_id : ConnectionId) -> Connection:
+        connection_key = get_connection_key(connection_id)
+        connection = self._load_grpc_object(CacheableObjectType.CONNECTION, connection_key)
+        if connection is None:
+            connection = get_connection(self._context_client, connection_id)
+            if connection is None: raise NotFoundException('Connection', connection_key)
+            connection : Connection = self._store_editable_grpc_object(
+                CacheableObjectType.CONNECTION, connection_key, Connection, connection)
+        return connection
+
+    def set_connection(self, connection : Connection) -> None:
+        connection_key = get_connection_key(connection.connection_id)
+        self._context_client.SetConnection(connection)
+        self._store_grpc_object(CacheableObjectType.CONNECTION, connection_key, connection)
+
+    def delete_connection(self, connection_id : ConnectionId) -> None:
+        connection_key = get_connection_key(connection_id)
+        self._context_client.RemoveConnection(connection_id)
+        self._delete_grpc_object(CacheableObjectType.CONNECTION, connection_key)
+
+    # ----- Device-related methods -------------------------------------------------------------------------------------
+
+    def get_device(self, device_id : DeviceId) -> Device:
+        device_key = get_device_key(device_id)
+        device = self._load_grpc_object(CacheableObjectType.DEVICE, device_key)
+        if device is None:
+            device = get_device(self._context_client, device_id)
+            if device is None: raise NotFoundException('Device', device_key)
+            device : Device = self._store_editable_grpc_object(
+                CacheableObjectType.DEVICE, device_key, Device, device)
+        return device
+
+    def configure_device(self, device : Device) -> None:
+        device_key = get_device_key(device.device_id)
+        self._device_client.ConfigureDevice(device)
+        self._store_grpc_object(CacheableObjectType.DEVICE, device_key, device)
+
+    def get_devices_from_connection(self, connection : Connection) -> Dict[str, Device]:
+        devices = dict()
+        for endpoint_id in connection.path_hops_endpoint_ids:
+            device = self.get_device(endpoint_id.device_id)
+            device_uuid = endpoint_id.device_id.device_uuid.uuid
+            if device is None: raise Exception('Device({:s}) not found'.format(str(device_uuid)))
+            devices[device_uuid] = device
+        return devices
+
+    # ----- Service-related methods ------------------------------------------------------------------------------------
+
+    def get_service(self, service_id : ServiceId) -> Service:
+        service_key = get_service_key(service_id)
+        service = self._load_grpc_object(CacheableObjectType.SERVICE, service_key)
+        if service is None:
+            service = get_service(self._context_client, service_id)
+            if service is None: raise NotFoundException('Service', service_key)
+            service : service = self._store_editable_grpc_object(
+                CacheableObjectType.SERVICE, service_key, Service, service)
+        return service
+
+    def set_service(self, service : Service) -> None:
+        service_key = get_service_key(service.service_id)
+        self._context_client.SetService(service)
+        self._store_grpc_object(CacheableObjectType.SERVICE, service_key, service)
+
+    def delete_service(self, service_id : ServiceId) -> None:
+        service_key = get_service_key(service_id)
+        self._context_client.RemoveService(service_id)
+        self._delete_grpc_object(CacheableObjectType.SERVICE, service_key)
+
+    # ----- Service Handler Factory ------------------------------------------------------------------------------------
+
+    def get_service_handler(
+        self, connection : Connection, service : Service, **service_handler_settings
+    ) -> _ServiceHandler:
+        connection_devices = self.get_devices_from_connection(connection)
+        service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices)
+        return service_handler_class(service, self, **service_handler_settings)
diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..de7e9eb7a70e683051e9d2fd906252713dcdba54
--- /dev/null
+++ b/src/service/service/task_scheduler/TaskScheduler.py
@@ -0,0 +1,210 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import graphlib, logging, queue, time
+from typing import Dict, Tuple
+from common.proto.context_pb2 import Connection, ConnectionId, Service, ServiceId, ServiceStatusEnum
+from common.proto.pathcomp_pb2 import PathCompReply
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
+from service.service.tools.ObjectKeys import get_connection_key, get_service_key
+from .tasks._Task import _Task
+from .tasks.Task_ConnectionConfigure import Task_ConnectionConfigure
+from .tasks.Task_ConnectionDeconfigure import Task_ConnectionDeconfigure
+from .tasks.Task_ServiceDelete import Task_ServiceDelete
+from .tasks.Task_ServiceSetStatus import Task_ServiceSetStatus
+from .TaskExecutor import CacheableObjectType, TaskExecutor
+
+LOGGER = logging.getLogger(__name__)
+
+class TasksScheduler:
+    def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None:
+        self._dag = graphlib.TopologicalSorter()
+        self._executor = TaskExecutor(service_handler_factory)
+        self._tasks : Dict[str, _Task] = dict()
+        self._context_client = ContextClient()
+
+    # ----- Helper methods ---------------------------------------------------------------------------------------------
+
+    def _add_task_if_not_exists(self, task : _Task) -> str:
+        task_key = task.key
+        if task_key not in self._tasks:
+            self._tasks[task_key] = task
+        return task_key
+
+    def _add_connection_to_executor_cache(self, connection : Connection) -> None:
+        connection_key = get_connection_key(connection.connection_id)
+        self._executor._store_editable_grpc_object(
+            CacheableObjectType.CONNECTION, connection_key, Connection, connection)
+
+    def _add_service_to_executor_cache(self, service : Service) -> None:
+        service_key = get_service_key(service.service_id)
+        self._executor._store_editable_grpc_object(
+            CacheableObjectType.SERVICE, service_key, Service, service)
+
+    # ----- Task & DAG composition methods -----------------------------------------------------------------------------
+
+    def _service_create(self, service_id : ServiceId) -> Tuple[str, str]:
+        service_planned_key = self._add_task_if_not_exists(Task_ServiceSetStatus(
+            self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PLANNED))
+
+        service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus(
+            self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE))
+
+        # activating a service requires the service is in planning state
+        self._dag.add(service_active_key, service_planned_key)
+        return service_planned_key, service_active_key
+
+    def _service_remove(self, service_id : ServiceId) -> Tuple[str, str]:
+        service_removing_key = self._add_task_if_not_exists(Task_ServiceSetStatus(
+            self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL))
+
+        service_delete_key = self._add_task_if_not_exists(Task_ServiceDelete(self._executor, service_id))
+
+        # deleting a service requires the service is in removing state
+        self._dag.add(service_delete_key, service_removing_key)
+        return service_removing_key, service_delete_key
+
+    def _connection_configure(self, connection_id : ConnectionId, service_id : ServiceId) -> str:
+        connection_configure_key = self._add_task_if_not_exists(Task_ConnectionConfigure(
+            self._executor, connection_id))
+
+        # the connection configuration depends on its connection's service being in planning state
+        service_planned_key = self._add_task_if_not_exists(Task_ServiceSetStatus(
+            self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PLANNED))
+        self._dag.add(connection_configure_key, service_planned_key)
+
+        # the connection's service depends on the connection configuration to transition to active state
+        service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus(
+            self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE))
+        self._dag.add(service_active_key, connection_configure_key)
+
+        return connection_configure_key
+
+    def _connection_deconfigure(self, connection_id : ConnectionId, service_id : ServiceId) -> str:
+        connection_deconfigure_key = self._add_task_if_not_exists(Task_ConnectionDeconfigure(
+            self._executor, connection_id))
+
+        # the connection deconfiguration depends on its connection's service being in removing state
+        service_pending_removal_key = self._add_task_if_not_exists(Task_ServiceSetStatus(
+            self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL))
+        self._dag.add(connection_deconfigure_key, service_pending_removal_key)
+
+        # the connection's service depends on the connection deconfiguration to transition to delete
+        service_delete_key = self._add_task_if_not_exists(Task_ServiceDelete(
+            self._executor, service_id))
+        self._dag.add(service_delete_key, connection_deconfigure_key)
+
+        return connection_deconfigure_key
+
+    def compose_from_pathcompreply(self, pathcomp_reply : PathCompReply, is_delete : bool = False) -> None:
+        t0 = time.time()
+        include_service = self._service_remove if is_delete else self._service_create
+        include_connection = self._connection_deconfigure if is_delete else self._connection_configure
+
+        for service in pathcomp_reply.services:
+            include_service(service.service_id)
+            self._add_service_to_executor_cache(service)
+
+        for connection in pathcomp_reply.connections:
+            connection_key = include_connection(connection.connection_id, connection.service_id)
+            self._add_connection_to_executor_cache(connection)
+            self._executor.get_service(connection.service_id)
+            for sub_service_id in connection.sub_service_ids:
+                _,service_key_done = include_service(sub_service_id)
+                self._executor.get_service(sub_service_id)
+                self._dag.add(connection_key, service_key_done)
+
+        t1 = time.time()
+        LOGGER.info('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0))
+
+    def compose_from_service(self, service : Service, is_delete : bool = False) -> None:
+        t0 = time.time()
+        include_service = self._service_remove if is_delete else self._service_create
+        include_connection = self._connection_deconfigure if is_delete else self._connection_configure
+
+        explored_items = set()
+        pending_items_to_explore = queue.Queue()
+        pending_items_to_explore.put(service)
+
+        while not pending_items_to_explore.empty():
+            try:
+                item = pending_items_to_explore.get(block=False)
+            except queue.Empty:
+                break
+
+            if isinstance(item, Service):
+                str_item_key = grpc_message_to_json_string(item.service_id)
+                if str_item_key in explored_items: continue
+
+                include_service(item.service_id)
+                self._add_service_to_executor_cache(item)
+                connections = self._context_client.ListConnections(item.service_id)
+                for connection in connections.connections:
+                    self._add_connection_to_executor_cache(connection)
+                    pending_items_to_explore.put(connection)
+
+                explored_items.add(str_item_key)
+
+            elif isinstance(item, ServiceId):
+                str_item_key = grpc_message_to_json_string(item)
+                if str_item_key in explored_items: continue
+
+                include_service(item)
+                self._executor.get_service(item)
+                connections = self._context_client.ListConnections(item)
+                for connection in connections.connections:
+                    self._add_connection_to_executor_cache(connection)
+                    pending_items_to_explore.put(connection)
+
+                explored_items.add(str_item_key)
+
+            elif isinstance(item, Connection):
+                str_item_key = grpc_message_to_json_string(item.connection_id)
+                if str_item_key in explored_items: continue
+
+                connection_key = include_connection(item.connection_id, item.service_id)
+                self._add_connection_to_executor_cache(connection)
+
+                self._executor.get_service(item.service_id)
+                pending_items_to_explore.put(item.service_id)
+
+                for sub_service_id in item.sub_service_ids:
+                    _,service_key_done = include_service(sub_service_id)
+                    self._executor.get_service(sub_service_id)
+                    self._dag.add(service_key_done, connection_key)
+                    pending_items_to_explore.put(sub_service_id)
+
+                explored_items.add(str_item_key)
+
+            else:
+                MSG = 'Unsupported item {:s}({:s})'
+                raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item)))
+
+        t1 = time.time()
+        LOGGER.info('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0))
+
+    def execute_all(self, dry_run : bool = False) -> None:
+        ordered_task_keys = list(self._dag.static_order())
+        LOGGER.info('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys)))
+
+        results = []
+        for task_key in ordered_task_keys:
+            task = self._tasks.get(task_key)
+            succeeded = True if dry_run else task.execute()
+            results.append(succeeded)
+
+        LOGGER.info('[execute_all] results={:s}'.format(str(results)))
+        return zip(ordered_task_keys, results)
diff --git a/src/service/service/task_scheduler/__init__.py b/src/service/service/task_scheduler/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70bfa5118f47eb93d5cdd0832ee7928030369286
--- /dev/null
+++ b/src/service/service/task_scheduler/__init__.py
@@ -0,0 +1,51 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TaskScheduler is initialized with a PathComputation Reply or a Service, and it collects/identifies the sub-services,
+# sub-connections, and operations associated to them. It discovers and takes care of the inter-dependencies among them,
+# and produces an ordered list of tasks to be executed to implement the desired create/delete operation on the service.
+# E.g., a service cannot be deleted if connections supporting that service still exist. If these connections are
+# supported by sub-services, the connection needs to be torn down before destroying the services.
+#
+# Internally, it composes a Directed Acyclic Graph (DAG) of dependencies between tasks. Each task performs a specific
+# operation on a connection or service. The DAG composition is based on information extracted from a PathComp reply
+# and/or interrogating the Context component.
+#
+# Example:
+#   A        B        C
+#   *---L3---*---L3---*
+#    *--L0--* *--L0--*
+# - L3 service between A and C, depends on L3 connections A-B and B-C.
+# - Each L3 connection is supported by an L0 service and its corresponding L0 connection.
+#
+# Dependency structure:
+#   service L3:A-C
+#       connection L3:A-B
+#           service L0:A-B
+#               connection L0:A-B
+#       connection L3:B-C
+#           service L0:B-C
+#               connection L0:B-C
+#
+# Resolution:
+#    - service.set(L3:A-C, state=PLANNING)
+#    - service.set(L0:A-B, state=PLANNING)
+#    - connection.configure(L0:A-B)
+#    - service.set(L0:A-B, state=ACTIVE)
+#    - connection.configure(L3:A-B)
+#    - service.set(L0:B-C, state=PLANNING)
+#    - connection.configure(L0:B-C)
+#    - service.set(L0:B-C, state=ACTIVE)
+#    - connection.configure(L3:B-C)
+#    - service.set(L3:A-C, state=ACTIVE)
diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
new file mode 100644
index 0000000000000000000000000000000000000000..beb7e5a0426b7705dbf780d8305a587a3d4fec14
--- /dev/null
+++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
@@ -0,0 +1,59 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import ConnectionId
+from common.rpc_method_wrapper.ServiceExceptions import OperationFailedException
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from service.service.service_handler_api.Tools import check_errors_setendpoint
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from service.service.tools.EndpointIdFormatters import endpointids_to_raw
+from service.service.tools.ObjectKeys import get_connection_key
+from ._Task import _Task
+
+KEY_TEMPLATE = 'connection({connection_id:s}):configure'
+
+class Task_ConnectionConfigure(_Task):
+    def __init__(self, task_executor : TaskExecutor, connection_id : ConnectionId) -> None:
+        super().__init__(task_executor)
+        self._connection_id = connection_id
+
+    @property
+    def connection_id(self) -> ConnectionId: return self._connection_id
+
+    @staticmethod
+    def build_key(connection_id : ConnectionId) -> str:
+        str_connection_id = get_connection_key(connection_id)
+        return KEY_TEMPLATE.format(connection_id=str_connection_id)
+
+    @property
+    def key(self) -> str: return self.build_key(self._connection_id)
+
+    def execute(self) -> None:
+        connection = self._task_executor.get_connection(self._connection_id)
+        service = self._task_executor.get_service(connection.service_id)
+
+        service_handler_settings = {}
+        service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings)
+
+        endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids)
+        connection_uuid = connection.connection_id.connection_uuid.uuid
+        results_setendpoint = service_handler.SetEndpoint(endpointids_to_set, connection_uuid=connection_uuid)
+        errors = check_errors_setendpoint(endpointids_to_set, results_setendpoint)
+        if len(errors) > 0:
+            MSG = 'SetEndpoint for Connection({:s}) from Service({:s})'
+            str_connection = grpc_message_to_json_string(connection)
+            str_service = grpc_message_to_json_string(service)
+            raise OperationFailedException(MSG.format(str_connection, str_service), extra_details=errors)
+
+        self._task_executor.set_connection(connection)
diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
new file mode 100644
index 0000000000000000000000000000000000000000..c04d950a8993166c3bbfab3c083d4f2898dcd3e8
--- /dev/null
+++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
@@ -0,0 +1,59 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import ConnectionId
+from common.rpc_method_wrapper.ServiceExceptions import OperationFailedException
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from service.service.service_handler_api.Tools import check_errors_deleteendpoint
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from service.service.tools.EndpointIdFormatters import endpointids_to_raw
+from service.service.tools.ObjectKeys import get_connection_key
+from ._Task import _Task
+
+KEY_TEMPLATE = 'connection({connection_id:s}):deconfigure'
+
+class Task_ConnectionDeconfigure(_Task):
+    def __init__(self, task_executor : TaskExecutor, connection_id : ConnectionId) -> None:
+        super().__init__(task_executor)
+        self._connection_id = connection_id
+
+    @property
+    def connection_id(self) -> ConnectionId: return self._connection_id
+
+    @staticmethod
+    def build_key(connection_id : ConnectionId) -> str:
+        str_connection_id = get_connection_key(connection_id)
+        return KEY_TEMPLATE.format(connection_id=str_connection_id)
+
+    @property
+    def key(self) -> str: return self.build_key(self._connection_id)
+
+    def execute(self) -> None:
+        connection = self._task_executor.get_connection(self._connection_id)
+        service = self._task_executor.get_service(connection.service_id)
+
+        service_handler_settings = {}
+        service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings)
+
+        endpointids_to_delete = endpointids_to_raw(connection.path_hops_endpoint_ids)
+        connection_uuid = connection.connection_id.connection_uuid.uuid
+        results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete, connection_uuid=connection_uuid)
+        errors = check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint)
+        if len(errors) > 0:
+            MSG = 'DeleteEndpoint for Connection({:s}) from Service({:s})'
+            str_connection = grpc_message_to_json_string(connection)
+            str_service = grpc_message_to_json_string(service)
+            raise OperationFailedException(MSG.format(str_connection, str_service), extra_details=errors)
+
+        self._task_executor.delete_connection(self._connection_id)
diff --git a/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py b/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py
new file mode 100644
index 0000000000000000000000000000000000000000..15da1ffedbb3235e6697dcd6c4b0c0429cad0450
--- /dev/null
+++ b/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py
@@ -0,0 +1,39 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import ServiceId
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from service.service.tools.ObjectKeys import get_service_key
+from ._Task import _Task
+
+KEY_TEMPLATE = 'service({service_id:s}):delete'
+
+class Task_ServiceDelete(_Task):
+    def __init__(self, task_executor : TaskExecutor, service_id : ServiceId) -> None:
+        super().__init__(task_executor)
+        self._service_id = service_id
+
+    @property
+    def service_id(self) -> ServiceId: return self._service_id
+
+    @staticmethod
+    def build_key(service_id : ServiceId) -> str:
+        str_service_id = get_service_key(service_id)
+        return KEY_TEMPLATE.format(service_id=str_service_id)
+
+    @property
+    def key(self) -> str: return self.build_key(self._service_id)
+
+    def execute(self) -> None:
+        self._task_executor.delete_service(self._service_id)
diff --git a/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py b/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py
new file mode 100644
index 0000000000000000000000000000000000000000..163954f1b786916ad8c5fde5e8a04def84af259b
--- /dev/null
+++ b/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py
@@ -0,0 +1,46 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import ServiceId, ServiceStatusEnum
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from service.service.tools.ObjectKeys import get_service_key
+from ._Task import _Task
+
+KEY_TEMPLATE = 'service({service_id:s}):set_status({new_status:s})'
+
+class Task_ServiceSetStatus(_Task):
+    def __init__(self, task_executor : TaskExecutor, service_id : ServiceId, new_status : ServiceStatusEnum) -> None:
+        super().__init__(task_executor)
+        self._service_id = service_id
+        self._new_status = new_status
+
+    @property
+    def service_id(self) -> ServiceId: return self._service_id
+
+    @property
+    def new_status(self) -> ServiceStatusEnum: return self._new_status
+
+    @staticmethod
+    def build_key(service_id : ServiceId, new_status : ServiceStatusEnum) -> str:
+        str_service_id = get_service_key(service_id)
+        str_new_status = ServiceStatusEnum.Name(new_status)
+        return KEY_TEMPLATE.format(service_id=str_service_id, new_status=str_new_status)
+
+    @property
+    def key(self) -> str: return self.build_key(self._service_id, self._new_status)
+
+    def execute(self) -> None:
+        service = self._task_executor.get_service(self._service_id)
+        service.service_status.service_status = self._new_status
+        self._task_executor.set_service(service)
diff --git a/src/service/service/task_scheduler/tasks/_Task.py b/src/service/service/task_scheduler/tasks/_Task.py
new file mode 100644
index 0000000000000000000000000000000000000000..c36f92973bfa3847c86d2d745792062ec828492f
--- /dev/null
+++ b/src/service/service/task_scheduler/tasks/_Task.py
@@ -0,0 +1,30 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+
+class _Task:
+    def __init__(self, task_executor : TaskExecutor) -> None:
+        self._task_executor = task_executor
+
+    @staticmethod
+    def build_key() -> str:
+        raise NotImplementedError('Task:build_key() not implemented')
+
+    @property
+    def key(self) -> str:
+        raise NotImplementedError('Task:key() not implemented')
+
+    def execute(self) -> bool:
+        raise NotImplementedError('Task:execute() not implemented')
diff --git a/src/service/service/task_scheduler/tasks/__init__.py b/src/service/service/task_scheduler/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/service/service/task_scheduler/tasks/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/service/service/tools/ContextGetters.py b/src/service/service/tools/ContextGetters.py
new file mode 100644
index 0000000000000000000000000000000000000000..79ccf956b26e914bfbe6bdedd005d9f98e216d38
--- /dev/null
+++ b/src/service/service/tools/ContextGetters.py
@@ -0,0 +1,42 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc
+from typing import Optional
+from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
+from context.client.ContextClient import ContextClient
+
+def get_connection(context_client : ContextClient, connection_id : ConnectionId) -> Optional[Connection]:
+    try:
+        connection : Connection = context_client.GetConnection(connection_id)
+        return connection
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
+        return None
+
+def get_device(context_client : ContextClient, device_id : DeviceId) -> Optional[Device]:
+    try:
+        device : Device = context_client.GetDevice(device_id)
+        return device
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
+        return None
+
+def get_service(context_client : ContextClient, service_id : ServiceId) -> Optional[Service]:
+    try:
+        service : Service = context_client.GetService(service_id)
+        return service
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
+        return None
diff --git a/src/service/service/tools/EndpointIdFormatters.py b/src/service/service/tools/EndpointIdFormatters.py
new file mode 100644
index 0000000000000000000000000000000000000000..2435df42cfa10d336553945e7e70171838f69237
--- /dev/null
+++ b/src/service/service/tools/EndpointIdFormatters.py
@@ -0,0 +1,27 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Optional, Tuple
+from common.proto.context_pb2 import EndPointId
+
+def endpointids_to_raw(traversed_endpoint_ids : List[EndPointId]) -> List[Tuple[str, str, Optional[str]]]:
+    raw_endpoint_ids : List[Tuple[str, str, Optional[str]]] = []
+    for endpoint_id in traversed_endpoint_ids:
+        device_uuid   = endpoint_id.device_id.device_uuid.uuid
+        endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+        topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
+        if len(topology_uuid) == 0: topology_uuid = None
+        endpoint_id_tuple = device_uuid, endpoint_uuid, topology_uuid
+        raw_endpoint_ids.append(endpoint_id_tuple)
+    return raw_endpoint_ids
diff --git a/src/service/service/tools/ObjectKeys.py b/src/service/service/tools/ObjectKeys.py
new file mode 100644
index 0000000000000000000000000000000000000000..e58d8bd3e9e5c992a3b9be9c3275f3b40c7ba5e9
--- /dev/null
+++ b/src/service/service/tools/ObjectKeys.py
@@ -0,0 +1,26 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import ConnectionId, DeviceId, ServiceId
+
+def get_connection_key(connection_id : ConnectionId) -> str:
+    return connection_id.connection_uuid.uuid
+
+def get_device_key(device_id : DeviceId) -> str:
+    return device_id.device_uuid.uuid
+
+def get_service_key(service_id : ServiceId) -> str:
+    context_uuid = service_id.context_id.context_uuid.uuid
+    service_uuid = service_id.service_uuid.uuid
+    return '{:s}/{:s}'.format(context_uuid, service_uuid)
diff --git a/src/service/service/tools/__init__.py b/src/service/service/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/service/service/tools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/service/tests/test_unitary_task_scheduler.py b/src/service/tests/test_unitary_task_scheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..020386d764ddc508d8fe6806ab1de6887620e33f
--- /dev/null
+++ b/src/service/tests/test_unitary_task_scheduler.py
@@ -0,0 +1,96 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+#from common.proto.context_pb2 import Connection, Service
+from common.proto.pathcomp_pb2 import PathCompReply
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
+from service.service.task_scheduler.TaskScheduler import TasksScheduler
+from .PrepareTestScenario import context_client # pylint: disable=unused-import
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+def test_task_scheduler():
+    # test: add services and connections that depend on each other
+    #       then, check if they are properly resolved.
+    # - service MAIN, depends on connection PKT-1, TAPI, and PKT-2
+    # - connection PKT-1, depends on nothing
+    # - connection TAPI, depends on service TAPI-1 and TAPI-2
+    # - connection PKT-2, depends on nothing
+    # - service TAPI-1, depends on connection TAPI-1
+    # - service TAPI-2, depends on connection TAPI-2
+
+    pathcomp_reply = PathCompReply()
+
+    service_main = pathcomp_reply.services.add()
+    service_main.service_id.context_id.context_uuid.uuid = 'admin'
+    service_main.service_id.service_uuid.uuid = 'MAIN'
+
+    service_tapi1 = pathcomp_reply.services.add()
+    service_tapi1.service_id.context_id.context_uuid.uuid = 'admin'
+    service_tapi1.service_id.service_uuid.uuid = 'TAPI-1'
+
+    service_tapi2 = pathcomp_reply.services.add()
+    service_tapi2.service_id.context_id.context_uuid.uuid = 'admin'
+    service_tapi2.service_id.service_uuid.uuid = 'TAPI-2'
+
+    connection_pkt1 = pathcomp_reply.connections.add()
+    connection_pkt1.connection_id.connection_uuid.uuid = 'PKT-1'
+    connection_pkt1.service_id.CopyFrom(service_main.service_id)
+
+    connection_tapi = pathcomp_reply.connections.add()
+    connection_tapi.connection_id.connection_uuid.uuid = 'TAPI'
+    connection_tapi.service_id.CopyFrom(service_main.service_id)
+
+    connection_pkt2 = pathcomp_reply.connections.add()
+    connection_pkt2.connection_id.connection_uuid.uuid = 'PKT-2'
+    connection_pkt2.service_id.CopyFrom(service_main.service_id)
+
+    connection_tapi1 = pathcomp_reply.connections.add()
+    connection_tapi1.connection_id.connection_uuid.uuid = 'TAPI-1'
+    connection_tapi1.service_id.CopyFrom(service_tapi1.service_id)
+    connection_tapi.sub_service_ids.append(service_tapi1.service_id)
+
+    connection_tapi2 = pathcomp_reply.connections.add()
+    connection_tapi2.connection_id.connection_uuid.uuid = 'TAPI-2'
+    connection_tapi2.service_id.CopyFrom(service_tapi2.service_id)
+    connection_tapi.sub_service_ids.append(service_tapi2.service_id)
+
+    LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply)))
+
+    service_handler_factory = ServiceHandlerFactory([])
+    task_scheduler = TasksScheduler(service_handler_factory)
+    task_scheduler.compose_from_pathcompreply(pathcomp_reply)
+    tasks_and_results = list(task_scheduler.execute_all(dry_run=True))
+
+    LOGGER.info('tasks_and_results={:s}'.format(str(tasks_and_results)))
+
+    CORRECT_ORDERED_TASK_KEYS = [
+        'service(admin/MAIN):set_status(SERVICESTATUS_PLANNED)',
+        'service(admin/TAPI-1):set_status(SERVICESTATUS_PLANNED)',
+        'service(admin/TAPI-2):set_status(SERVICESTATUS_PLANNED)',
+        'connection(PKT-1):configure',
+        'connection(PKT-2):configure',
+        'connection(TAPI-1):configure',
+        'connection(TAPI-2):configure',
+        'service(admin/TAPI-1):set_status(SERVICESTATUS_ACTIVE)',
+        'service(admin/TAPI-2):set_status(SERVICESTATUS_ACTIVE)',
+        'connection(TAPI):configure',
+        'service(admin/MAIN):set_status(SERVICESTATUS_ACTIVE)'
+    ]
+
+    for (task_key,_),correct_key in zip(tasks_and_results, CORRECT_ORDERED_TASK_KEYS):
+        assert task_key == correct_key
diff --git a/src/tests/ecoc22/.gitignore b/src/tests/ecoc22/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0a3f4400d5c88b1af32c7667d69d2fdc12d5424e
--- /dev/null
+++ b/src/tests/ecoc22/.gitignore
@@ -0,0 +1,2 @@
+# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
+descriptors_real.json
diff --git a/src/tests/ecoc22/__init__.py b/src/tests/ecoc22/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/ecoc22/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8afd683843d4882e75c3cbca8363aa3d63edda7f
--- /dev/null
+++ b/src/tests/ecoc22/deploy_specs.sh
@@ -0,0 +1,17 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
diff --git a/src/tests/ecoc22/descriptors_emulated-BigNet.json b/src/tests/ecoc22/descriptors_emulated-BigNet.json
new file mode 100644
index 0000000000000000000000000000000000000000..cd038269425755258cea9b0908478d66702ad9cc
--- /dev/null
+++ b/src/tests/ecoc22/descriptors_emulated-BigNet.json
@@ -0,0 +1,1299 @@
+{
+    "contexts": [
+        {
+            "context_id": {
+                "context_uuid": {
+                    "uuid": "admin"
+                }
+            },
+            "service_ids": [],
+            "topology_ids": []
+        }
+    ],
+    "devices": [
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CE1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CE2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CE3"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CE4"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "PE1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "PE2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "PE3"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "PE4"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "BB1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "BB2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "BB6"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/4\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/5\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/6\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "BB7"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "BB3"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "BB5"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "BB4"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        }
+    ],
+    "links": [
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CE1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CE1/1/1==CE1/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CE2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CE2/1/1==CE2/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CE3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CE3/1/1==CE3/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CE4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CE4/1/1==CE4/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE1/2/1==PE1/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE1/2/2==PE1/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE2/2/1==PE2/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE2/2/2==PE2/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB5"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE3/2/2==PE3/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE3/2/1==PE3/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB5"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE4/2/2==PE4/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "PE4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "PE4/2/1==PE4/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB1/2/1==BB1/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB2/2/1==BB2/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB3/2/1==BB3/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB5"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB4/2/1==BB4/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB5"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB6"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB5/2/1==BB5/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB6"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB6/2/1==BB6/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB7"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB1/2/3==BB1/2/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB7"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB2/2/3==BB2/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB7"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB3/2/3==BB3/2/3"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB7"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/4"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB4/2/3==BB4/2/4"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB5"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB7"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/5"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB5/2/3==BB5/2/5"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB6"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "BB7"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/6"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "BB6/2/3==BB6/2/6"
+                }
+            }
+        }
+    ],
+    "topologies": [
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "admin"
+                }
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json
new file mode 100644
index 0000000000000000000000000000000000000000..5f40edac2feef134c02a74b08fcad21d917aae07
--- /dev/null
+++ b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json
@@ -0,0 +1,1005 @@
+{
+    "contexts": [
+        {
+            "context_id": {
+                "context_uuid": {
+                    "uuid": "admin"
+                }
+            },
+            "service_ids": [],
+            "topology_ids": []
+        }
+    ],
+    "devices": [
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "DC1-GW"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-datacenter"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "DC2-GW"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-datacenter"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS1-GW1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS1-GW2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS2-GW1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS2-GW2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R3"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R4"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        }
+    ],
+    "links": [
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC1-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC1-GW/eth1==CS1-GW1/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC1-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC1-GW/eth2==CS1-GW2/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC2-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC2-GW/eth1==CS2-GW1/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC2-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC2-GW/eth2==CS2-GW2/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW1/1/1==TN-R1/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW2/1/1==TN-R2/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW1/1/2==TN-R2/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW2/1/2==TN-R1/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW1/1/1==TN-R3/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW2/1/1==TN-R4/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW1/1/2==TN-R4/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW2/1/2==TN-R3/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R1/2/1==TN-R2/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R2/2/1==TN-R3/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R3/2/1==TN-R4/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R4/2/1==TN-R1/2/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R1/2/3==TN-R3/2/3"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/3"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R2/2/3==TN-R4/2/3"
+                }
+            }
+        }
+    ],
+    "topologies": [
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "admin"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "DC1"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "DC2"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "CS1"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "CS2"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "TN"
+                }
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json
new file mode 100644
index 0000000000000000000000000000000000000000..8d8e6fde3d3c183688fdc6ec7c3e6498c0d6791a
--- /dev/null
+++ b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json
@@ -0,0 +1,985 @@
+{
+    "contexts": [
+        {
+            "context_id": {
+                "context_uuid": {
+                    "uuid": "admin"
+                }
+            },
+            "service_ids": [],
+            "topology_ids": []
+        }
+    ],
+    "devices": [
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "DC1-GW"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-datacenter"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "DC2-GW"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-datacenter"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS1-GW1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS1-GW2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS2-GW1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "CS2-GW2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R1"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R2"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R3"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-R4"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-packet-router"
+        },
+        {
+            "device_config": {
+                "config_rules": [
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/address",
+                            "resource_value": "127.0.0.1"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/port",
+                            "resource_value": "0"
+                        }
+                    },
+                    {
+                        "action": 1,
+                        "custom": {
+                            "resource_key": "_connect/settings",
+                            "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"a3adcbbcc03f\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"9329780033f5\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"e8a127ea3ed1\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"ef1c58823a49\"}]}"
+                        }
+                    }
+                ]
+            },
+            "device_drivers": [
+                0
+            ],
+            "device_endpoints": [],
+            "device_id": {
+                "device_uuid": {
+                    "uuid": "TN-OLS"
+                }
+            },
+            "device_operational_status": 1,
+            "device_type": "emu-open-line-system"
+        }
+    ],
+    "links": [
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC1-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC1-GW/eth1==CS1-GW1/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC1-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC1-GW/eth2==CS1-GW2/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC2-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC2-GW/eth1==CS2-GW1/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "DC2-GW"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "eth2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "10/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "DC2-GW/eth2==CS2-GW2/10/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW1/1/1==TN-R1/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW2/1/1==TN-R2/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW1/1/2==TN-R2/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS1-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS1-GW2/1/2==TN-R1/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW1/1/1==TN-R3/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW2/1/1==TN-R4/1/1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW1/1/2==TN-R4/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "CS2-GW2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "1/2"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "CS2-GW2/1/2==TN-R3/1/2"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R1"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-OLS"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "a3adcbbcc03f"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R1/2/1==TN-OLS/a3adcbbcc03f"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R2"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-OLS"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "9329780033f5"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R2/2/1==TN-OLS/9329780033f5"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R3"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-OLS"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "e8a127ea3ed1"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R3/2/1==TN-OLS/e8a127ea3ed1"
+                }
+            }
+        },
+        {
+            "link_endpoint_ids": [
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-R4"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "2/1"
+                    }
+                },
+                {
+                    "device_id": {
+                        "device_uuid": {
+                            "uuid": "TN-OLS"
+                        }
+                    },
+                    "endpoint_uuid": {
+                        "uuid": "ef1c58823a49"
+                    }
+                }
+            ],
+            "link_id": {
+                "link_uuid": {
+                    "uuid": "TN-R4/2/1==TN-OLS/ef1c58823a49"
+                }
+            }
+        }
+    ],
+    "topologies": [
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "admin"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "DC1"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "DC2"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "CS1"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "CS2"
+                }
+            }
+        },
+        {
+            "device_ids": [],
+            "link_ids": [],
+            "topology_id": {
+                "context_id": {
+                    "context_uuid": {
+                        "uuid": "admin"
+                    }
+                },
+                "topology_uuid": {
+                    "uuid": "TN"
+                }
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/src/tests/ecoc22/redeploy.sh b/src/tests/ecoc22/redeploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3f3986debb9aec57e7bc7f67b549b960679a987f
--- /dev/null
+++ b/src/tests/ecoc22/redeploy.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+source ecoc22/deploy_specs.sh
+./deploy.sh
+source tfs_runtime_env_vars.sh
diff --git a/src/tests/ecoc22/run_test_01_bootstrap.sh b/src/tests/ecoc22/run_test_01_bootstrap.sh
new file mode 100755
index 0000000000000000000000000000000000000000..819991d78a499c6d6e4a10e96f6439ee5b56ed8d
--- /dev/null
+++ b/src/tests/ecoc22/run_test_01_bootstrap.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+pytest --verbose src/tests/ecoc22/tests/test_functional_bootstrap.py
diff --git a/src/tests/ecoc22/run_test_02_create_service.sh b/src/tests/ecoc22/run_test_02_create_service.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5a54d39d496e203ee669efda636067dcc1aa27a9
--- /dev/null
+++ b/src/tests/ecoc22/run_test_02_create_service.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+pytest --verbose src/tests/ecoc22/tests/test_functional_create_service.py
diff --git a/src/tests/ecoc22/run_test_03_delete_service.sh b/src/tests/ecoc22/run_test_03_delete_service.sh
new file mode 100755
index 0000000000000000000000000000000000000000..900e09b658c1a73664dd28dc60ef6a50a9e68570
--- /dev/null
+++ b/src/tests/ecoc22/run_test_03_delete_service.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+pytest --verbose src/tests/ecoc22/tests/test_functional_delete_service.py
diff --git a/src/tests/ecoc22/run_test_04_cleanup.sh b/src/tests/ecoc22/run_test_04_cleanup.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4e0622e6b22d470d842d99bb4202e23e88b72982
--- /dev/null
+++ b/src/tests/ecoc22/run_test_04_cleanup.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+pytest --verbose src/tests/ecoc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ecoc22/run_tests_and_coverage.sh b/src/tests/ecoc22/run_tests_and_coverage.sh
new file mode 100755
index 0000000000000000000000000000000000000000..835867896020f2b94e0797bdf60c85af2228eda2
--- /dev/null
+++ b/src/tests/ecoc22/run_tests_and_coverage.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+COVERAGEFILE=$PROJECTDIR/coverage/.coverage
+
+# Configure the correct folder on the .coveragerc file
+cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE
+
+# Destroy old coverage file
+rm -f $COVERAGEFILE
+
+# Force a flush of Context database
+kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
+
+# Run functional tests and analyze code coverage at the same time
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ecoc22/tests/test_functional_bootstrap.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ecoc22/tests/test_functional_create_service.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ecoc22/tests/test_functional_delete_service.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ecoc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ecoc22/tests/.gitignore b/src/tests/ecoc22/tests/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..6b97d6fe3ad32f39097745229ab7f547f26ecb12
--- /dev/null
+++ b/src/tests/ecoc22/tests/.gitignore
@@ -0,0 +1 @@
+# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
diff --git a/src/tests/ecoc22/tests/BuildDescriptors.py b/src/tests/ecoc22/tests/BuildDescriptors.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0075c0639c70092ed60bafd06c9f62b581faa33
--- /dev/null
+++ b/src/tests/ecoc22/tests/BuildDescriptors.py
@@ -0,0 +1,71 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Execution:
+# $ cd src
+# $ python -m tests.ecoc22.tests.BuildDescriptors dc-csgw-tn
+# $ python -m tests.ecoc22.tests.BuildDescriptors dc-csgw-tn-ols
+# $ python -m tests.ecoc22.tests.BuildDescriptors bignet
+
+import copy, json, os, sys
+from enum import Enum
+from typing import Dict, Tuple
+
+class Scenario(Enum):
+    BIGNET         = 'bignet'
+    DC_CSGW_TN     = 'dc-csgw-tn'
+    DC_CSGW_TN_OLS = 'dc-csgw-tn-ols'
+
+scenario = None if len(sys.argv) < 2 else sys.argv[1].lower()
+
+if scenario == Scenario.BIGNET.value:
+    from .Objects_BigNet import CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+    FILENAME = 'tests/ecoc22/descriptors_emulated-BigNet.json'
+elif scenario == Scenario.DC_CSGW_TN.value:
+    os.environ['ADD_CONNECT_RULES_TO_DEVICES'] = 'TRUE'
+    from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+    FILENAME = 'tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json'
+elif scenario == Scenario.DC_CSGW_TN_OLS.value:
+    os.environ['ADD_CONNECT_RULES_TO_DEVICES'] = 'TRUE'
+    from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+    FILENAME = 'tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json'
+else:
+    scenarios = str([s.value for s in Scenario])
+    raise Exception('Unsupported Scenario({:s}), choices are: {:s}'.format(scenario, scenarios))
+
+def main():
+    with open(FILENAME, 'w', encoding='UTF-8') as f:
+        devices = []
+        for item in DEVICES:
+            if isinstance(item, Dict):
+                device = item
+            elif isinstance(item, Tuple) and len(item) == 2:
+                device,connect_rules = item
+            else:
+                raise Exception('Wrongly formatted item: {:s}'.format(str(item)))
+            device = copy.deepcopy(device)
+            if len(item) == 2:
+                device['device_config']['config_rules'].extend(connect_rules)
+            devices.append(device)
+
+        f.write(json.dumps({
+            'contexts': CONTEXTS,
+            'topologies': TOPOLOGIES,
+            'devices': devices,
+            'links': LINKS
+        }, sort_keys=True, indent=4))
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/tests/ecoc22/tests/Credentials.py b/src/tests/ecoc22/tests/Credentials.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/tests/ecoc22/tests/Fixtures.py b/src/tests/ecoc22/tests/Fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..70b41bdcb159552daa3dcf0c041a3713e2d1c821
--- /dev/null
+++ b/src/tests/ecoc22/tests/Fixtures.py
@@ -0,0 +1,26 @@
+import pytest
+from common.Settings import get_setting
+from compute.tests.mock_osm.MockOSM import MockOSM
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+#from .Objects_BigNet import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
+from .Objects_DC_CSGW_TN import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
+#from .Objects_DC_CSGW_TN_OLS import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
+
+@pytest.fixture(scope='session')
+def context_client():
+    _client = ContextClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def device_client():
+    _client = DeviceClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def osm_wim():
+    wim_url = 'http://{:s}:{:s}'.format(
+        get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
+    return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
diff --git a/src/tests/ecoc22/tests/LoadDescriptors.py b/src/tests/ecoc22/tests/LoadDescriptors.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd7e48366795d47624f1b8e295cbe6fa105bf8c7
--- /dev/null
+++ b/src/tests/ecoc22/tests/LoadDescriptors.py
@@ -0,0 +1,38 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, sys
+from common.Settings import get_setting
+from context.client.ContextClient import ContextClient
+from common.proto.context_pb2 import Context, Device, Link, Topology
+from device.client.DeviceClient import DeviceClient
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+def main():
+    context_client = ContextClient()
+    device_client  = DeviceClient()
+
+    with open('tests/ecoc22/descriptors.json', 'r', encoding='UTF-8') as f:
+        descriptors = json.loads(f.read())
+
+    for context  in descriptors['contexts'  ]: context_client.SetContext (Context (**context ))
+    for topology in descriptors['topologies']: context_client.SetTopology(Topology(**topology))
+    for device   in descriptors['devices'   ]: device_client .AddDevice  (Device  (**device  ))
+    for link     in descriptors['links'     ]: context_client.SetLink    (Link    (**link    ))
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/tests/ecoc22/tests/Objects_BigNet.py b/src/tests/ecoc22/tests/Objects_BigNet.py
new file mode 100644
index 0000000000000000000000000000000000000000..592376ff9dbaebbf4d8d02b04189e5d4f24584e3
--- /dev/null
+++ b/src/tests/ecoc22/tests/Objects_BigNet.py
@@ -0,0 +1,302 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Device import (
+    json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
+    json_device_emulated_packet_router_disabled, json_device_id)
+from common.tools.object_factory.Topology import json_topology, json_topology_id
+from .Tools import compose_bearer, compose_service_endpoint_id, json_endpoint_ids, link
+
+# ----- Context --------------------------------------------------------------------------------------------------------
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
+CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+
+
+# ----- Topology -------------------------------------------------------------------------------------------------------
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+
+
+# ----- Customer Equipment (CE) Devices --------------------------------------------------------------------------------
+DEVICE_CE1_UUID          = 'CE1'
+DEVICE_CE1_ENDPOINT_DEFS = [('1/1', 'copper', [])]
+DEVICE_CE1_ID            = json_device_id(DEVICE_CE1_UUID)
+DEVICE_CE1_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_CE1_ID, DEVICE_CE1_ENDPOINT_DEFS)
+DEVICE_CE1               = json_device_emulated_packet_router_disabled(DEVICE_CE1_UUID)
+ENDPOINT_ID_CE1_1_1      = DEVICE_CE1_ENDPOINT_IDS[0]
+DEVICE_CE1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE1_ENDPOINT_DEFS)
+
+DEVICE_CE2_UUID          = 'CE2'
+DEVICE_CE2_ENDPOINT_DEFS = [('1/1', 'copper', [])]
+DEVICE_CE2_ID            = json_device_id(DEVICE_CE2_UUID)
+DEVICE_CE2_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_CE2_ID, DEVICE_CE2_ENDPOINT_DEFS)
+DEVICE_CE2               = json_device_emulated_packet_router_disabled(DEVICE_CE2_UUID)
+ENDPOINT_ID_CE2_1_1      = DEVICE_CE2_ENDPOINT_IDS[0]
+DEVICE_CE2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE2_ENDPOINT_DEFS)
+
+DEVICE_CE3_UUID          = 'CE3'
+DEVICE_CE3_ENDPOINT_DEFS = [('1/1', 'copper', [])]
+DEVICE_CE3_ID            = json_device_id(DEVICE_CE3_UUID)
+DEVICE_CE3_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_CE3_ID, DEVICE_CE3_ENDPOINT_DEFS)
+DEVICE_CE3               = json_device_emulated_packet_router_disabled(DEVICE_CE3_UUID)
+ENDPOINT_ID_CE3_1_1      = DEVICE_CE3_ENDPOINT_IDS[0]
+DEVICE_CE3_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE3_ENDPOINT_DEFS)
+
+DEVICE_CE4_UUID          = 'CE4'
+DEVICE_CE4_ENDPOINT_DEFS = [('1/1', 'copper', [])]
+DEVICE_CE4_ID            = json_device_id(DEVICE_CE4_UUID)
+DEVICE_CE4_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_CE4_ID, DEVICE_CE4_ENDPOINT_DEFS)
+DEVICE_CE4               = json_device_emulated_packet_router_disabled(DEVICE_CE4_UUID)
+ENDPOINT_ID_CE4_1_1      = DEVICE_CE4_ENDPOINT_IDS[0]
+DEVICE_CE4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE4_ENDPOINT_DEFS)
+
+# ----- Provider Equipment (PE) Devices --------------------------------------------------------------------------------
+DEVICE_PE1_UUID          = 'PE1'
+DEVICE_PE1_ENDPOINT_DEFS = [('1/1', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', [])]
+DEVICE_PE1_ID            = json_device_id(DEVICE_PE1_UUID)
+DEVICE_PE1_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_PE1_ID, DEVICE_PE1_ENDPOINT_DEFS)
+DEVICE_PE1               = json_device_emulated_packet_router_disabled(DEVICE_PE1_UUID)
+ENDPOINT_ID_PE1_1_1      = DEVICE_PE1_ENDPOINT_IDS[0]
+ENDPOINT_ID_PE1_2_1      = DEVICE_PE1_ENDPOINT_IDS[1]
+ENDPOINT_ID_PE1_2_2      = DEVICE_PE1_ENDPOINT_IDS[2]
+DEVICE_PE1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE1_ENDPOINT_DEFS)
+
+DEVICE_PE2_UUID          = 'PE2'
+DEVICE_PE2_ENDPOINT_DEFS = [('1/1', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', [])]
+DEVICE_PE2_ID            = json_device_id(DEVICE_PE2_UUID)
+DEVICE_PE2_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_PE2_ID, DEVICE_PE2_ENDPOINT_DEFS)
+DEVICE_PE2               = json_device_emulated_packet_router_disabled(DEVICE_PE2_UUID)
+ENDPOINT_ID_PE2_1_1      = DEVICE_PE2_ENDPOINT_IDS[0]
+ENDPOINT_ID_PE2_2_1      = DEVICE_PE2_ENDPOINT_IDS[1]
+ENDPOINT_ID_PE2_2_2      = DEVICE_PE2_ENDPOINT_IDS[2]
+DEVICE_PE2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE2_ENDPOINT_DEFS)
+
+DEVICE_PE3_UUID          = 'PE3'
+DEVICE_PE3_ENDPOINT_DEFS = [('1/1', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', [])]
+DEVICE_PE3_ID            = json_device_id(DEVICE_PE3_UUID)
+DEVICE_PE3_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_PE3_ID, DEVICE_PE3_ENDPOINT_DEFS)
+DEVICE_PE3               = json_device_emulated_packet_router_disabled(DEVICE_PE3_UUID)
+ENDPOINT_ID_PE3_1_1      = DEVICE_PE3_ENDPOINT_IDS[0]
+ENDPOINT_ID_PE3_2_1      = DEVICE_PE3_ENDPOINT_IDS[1]
+ENDPOINT_ID_PE3_2_2      = DEVICE_PE3_ENDPOINT_IDS[2]
+DEVICE_PE3_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE3_ENDPOINT_DEFS)
+
+DEVICE_PE4_UUID          = 'PE4'
+DEVICE_PE4_ENDPOINT_DEFS = [('1/1', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', [])]
+DEVICE_PE4_ID            = json_device_id(DEVICE_PE4_UUID)
+DEVICE_PE4_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_PE4_ID, DEVICE_PE4_ENDPOINT_DEFS)
+DEVICE_PE4               = json_device_emulated_packet_router_disabled(DEVICE_PE4_UUID)
+ENDPOINT_ID_PE4_1_1      = DEVICE_PE4_ENDPOINT_IDS[0]
+ENDPOINT_ID_PE4_2_1      = DEVICE_PE4_ENDPOINT_IDS[1]
+ENDPOINT_ID_PE4_2_2      = DEVICE_PE4_ENDPOINT_IDS[2]
+DEVICE_PE4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE4_ENDPOINT_DEFS)
+
+# ----- BackBone (BB) Devices ------------------------------------------------------------------------------------------
+DEVICE_BB1_UUID          = 'BB1'
+DEVICE_BB1_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])]
+DEVICE_BB1_ID            = json_device_id(DEVICE_BB1_UUID)
+DEVICE_BB1_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_BB1_ID, DEVICE_BB1_ENDPOINT_DEFS)
+DEVICE_BB1               = json_device_emulated_packet_router_disabled(DEVICE_BB1_UUID)
+ENDPOINT_ID_BB1_1_1      = DEVICE_BB1_ENDPOINT_IDS[0]
+ENDPOINT_ID_BB1_1_2      = DEVICE_BB1_ENDPOINT_IDS[1]
+ENDPOINT_ID_BB1_2_1      = DEVICE_BB1_ENDPOINT_IDS[2]
+ENDPOINT_ID_BB1_2_2      = DEVICE_BB1_ENDPOINT_IDS[3]
+ENDPOINT_ID_BB1_2_3      = DEVICE_BB1_ENDPOINT_IDS[4]
+DEVICE_BB1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB1_ENDPOINT_DEFS)
+
+DEVICE_BB2_UUID          = 'BB2'
+DEVICE_BB2_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])]
+DEVICE_BB2_ID            = json_device_id(DEVICE_BB2_UUID)
+DEVICE_BB2_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_BB2_ID, DEVICE_BB2_ENDPOINT_DEFS)
+DEVICE_BB2               = json_device_emulated_packet_router_disabled(DEVICE_BB2_UUID)
+ENDPOINT_ID_BB2_1_1      = DEVICE_BB2_ENDPOINT_IDS[0]
+ENDPOINT_ID_BB2_1_2      = DEVICE_BB2_ENDPOINT_IDS[1]
+ENDPOINT_ID_BB2_2_1      = DEVICE_BB2_ENDPOINT_IDS[2]
+ENDPOINT_ID_BB2_2_2      = DEVICE_BB2_ENDPOINT_IDS[3]
+ENDPOINT_ID_BB2_2_3      = DEVICE_BB2_ENDPOINT_IDS[4]
+DEVICE_BB2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB2_ENDPOINT_DEFS)
+
+DEVICE_BB3_UUID          = 'BB3'
+DEVICE_BB3_ENDPOINT_DEFS = [('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])]
+DEVICE_BB3_ID            = json_device_id(DEVICE_BB3_UUID)
+DEVICE_BB3_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_BB3_ID, DEVICE_BB3_ENDPOINT_DEFS)
+DEVICE_BB3               = json_device_emulated_packet_router_disabled(DEVICE_BB3_UUID)
+ENDPOINT_ID_BB3_2_1      = DEVICE_BB3_ENDPOINT_IDS[0]
+ENDPOINT_ID_BB3_2_2      = DEVICE_BB3_ENDPOINT_IDS[1]
+ENDPOINT_ID_BB3_2_3      = DEVICE_BB3_ENDPOINT_IDS[2]
+DEVICE_BB3_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB3_ENDPOINT_DEFS)
+
+DEVICE_BB4_UUID          = 'BB4'
+DEVICE_BB4_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])]
+DEVICE_BB4_ID            = json_device_id(DEVICE_BB4_UUID)
+DEVICE_BB4_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_BB4_ID, DEVICE_BB4_ENDPOINT_DEFS)
+DEVICE_BB4               = json_device_emulated_packet_router_disabled(DEVICE_BB4_UUID)
+ENDPOINT_ID_BB4_1_1      = DEVICE_BB4_ENDPOINT_IDS[0]
+ENDPOINT_ID_BB4_1_2      = DEVICE_BB4_ENDPOINT_IDS[1]
+ENDPOINT_ID_BB4_2_1      = DEVICE_BB4_ENDPOINT_IDS[2]
+ENDPOINT_ID_BB4_2_2      = DEVICE_BB4_ENDPOINT_IDS[3]
+ENDPOINT_ID_BB4_2_3      = DEVICE_BB4_ENDPOINT_IDS[4]
+DEVICE_BB4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB4_ENDPOINT_DEFS)
+
+DEVICE_BB5_UUID          = 'BB5'
+DEVICE_BB5_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []),
+                            ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])]
+DEVICE_BB5_ID            = json_device_id(DEVICE_BB5_UUID)
+DEVICE_BB5_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_BB5_ID, DEVICE_BB5_ENDPOINT_DEFS)
+DEVICE_BB5               = json_device_emulated_packet_router_disabled(DEVICE_BB5_UUID)
+ENDPOINT_ID_BB5_1_1      = DEVICE_BB5_ENDPOINT_IDS[0]
+ENDPOINT_ID_BB5_1_2      = DEVICE_BB5_ENDPOINT_IDS[1]
+ENDPOINT_ID_BB5_2_1      = DEVICE_BB5_ENDPOINT_IDS[2]
+ENDPOINT_ID_BB5_2_2      = DEVICE_BB5_ENDPOINT_IDS[3]
+ENDPOINT_ID_BB5_2_3      = DEVICE_BB5_ENDPOINT_IDS[4]
+DEVICE_BB5_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB5_ENDPOINT_DEFS)
+
+DEVICE_BB6_UUID          = 'BB6'
+DEVICE_BB6_ENDPOINT_DEFS = [('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])]
+DEVICE_BB6_ID            = json_device_id(DEVICE_BB6_UUID)
+DEVICE_BB6_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_BB6_ID, DEVICE_BB6_ENDPOINT_DEFS)
+DEVICE_BB6               = json_device_emulated_packet_router_disabled(DEVICE_BB6_UUID)
+ENDPOINT_ID_BB6_2_1      = DEVICE_BB6_ENDPOINT_IDS[0]
+ENDPOINT_ID_BB6_2_2      = DEVICE_BB6_ENDPOINT_IDS[1]
+ENDPOINT_ID_BB6_2_3      = DEVICE_BB6_ENDPOINT_IDS[2]
+DEVICE_BB6_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB6_ENDPOINT_DEFS)
+
+DEVICE_BB7_UUID          = 'BB7'
+DEVICE_BB7_ENDPOINT_DEFS = [('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', []), ('2/4', 'copper', []),
+                            ('2/5', 'copper', []), ('2/6', 'copper', [])]
+DEVICE_BB7_ID            = json_device_id(DEVICE_BB7_UUID)
+DEVICE_BB7_ENDPOINT_IDS  = json_endpoint_ids(DEVICE_BB7_ID, DEVICE_BB7_ENDPOINT_DEFS)
+DEVICE_BB7               = json_device_emulated_packet_router_disabled(DEVICE_BB7_UUID)
+ENDPOINT_ID_BB7_2_1      = DEVICE_BB7_ENDPOINT_IDS[0]
+ENDPOINT_ID_BB7_2_2      = DEVICE_BB7_ENDPOINT_IDS[1]
+ENDPOINT_ID_BB7_2_3      = DEVICE_BB7_ENDPOINT_IDS[2]
+ENDPOINT_ID_BB7_2_4      = DEVICE_BB7_ENDPOINT_IDS[3]
+ENDPOINT_ID_BB7_2_5      = DEVICE_BB7_ENDPOINT_IDS[4]
+ENDPOINT_ID_BB7_2_6      = DEVICE_BB7_ENDPOINT_IDS[5]
+DEVICE_BB7_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB7_ENDPOINT_DEFS)
+
+
+# ----- Links ----------------------------------------------------------------------------------------------------------
+LINK_CE1_PE1_UUID, LINK_CE1_PE1_ID, LINK_CE1_PE1 = link(ENDPOINT_ID_CE1_1_1, ENDPOINT_ID_PE1_1_1)
+LINK_CE2_PE2_UUID, LINK_CE2_PE2_ID, LINK_CE2_PE2 = link(ENDPOINT_ID_CE2_1_1, ENDPOINT_ID_PE2_1_1)
+LINK_CE3_PE3_UUID, LINK_CE3_PE3_ID, LINK_CE3_PE3 = link(ENDPOINT_ID_CE3_1_1, ENDPOINT_ID_PE3_1_1)
+LINK_CE4_PE4_UUID, LINK_CE4_PE4_ID, LINK_CE4_PE4 = link(ENDPOINT_ID_CE4_1_1, ENDPOINT_ID_PE4_1_1)
+
+LINK_PE1_BB1_UUID, LINK_PE1_BB1_ID, LINK_PE1_BB1 = link(ENDPOINT_ID_PE1_2_1, ENDPOINT_ID_BB1_1_1)
+LINK_PE1_BB2_UUID, LINK_PE1_BB2_ID, LINK_PE1_BB2 = link(ENDPOINT_ID_PE1_2_2, ENDPOINT_ID_BB2_1_1)
+LINK_PE2_BB1_UUID, LINK_PE2_BB1_ID, LINK_PE2_BB1 = link(ENDPOINT_ID_PE2_2_1, ENDPOINT_ID_BB1_1_2)
+LINK_PE2_BB2_UUID, LINK_PE2_BB2_ID, LINK_PE2_BB2 = link(ENDPOINT_ID_PE2_2_2, ENDPOINT_ID_BB2_1_2)
+
+LINK_PE3_BB4_UUID, LINK_PE3_BB4_ID, LINK_PE3_BB4 = link(ENDPOINT_ID_PE3_2_1, ENDPOINT_ID_BB4_1_1)
+LINK_PE3_BB5_UUID, LINK_PE3_BB5_ID, LINK_PE3_BB5 = link(ENDPOINT_ID_PE3_2_2, ENDPOINT_ID_BB5_1_1)
+LINK_PE4_BB4_UUID, LINK_PE4_BB4_ID, LINK_PE4_BB4 = link(ENDPOINT_ID_PE4_2_1, ENDPOINT_ID_BB4_1_2)
+LINK_PE4_BB5_UUID, LINK_PE4_BB5_ID, LINK_PE4_BB5 = link(ENDPOINT_ID_PE4_2_2, ENDPOINT_ID_BB5_1_2)
+
+LINK_BB1_BB2_UUID, LINK_BB1_BB2_ID, LINK_BB1_BB2 = link(ENDPOINT_ID_BB1_2_1, ENDPOINT_ID_BB2_2_2)
+LINK_BB2_BB3_UUID, LINK_BB2_BB3_ID, LINK_BB2_BB3 = link(ENDPOINT_ID_BB2_2_1, ENDPOINT_ID_BB3_2_2)
+LINK_BB3_BB4_UUID, LINK_BB3_BB4_ID, LINK_BB3_BB4 = link(ENDPOINT_ID_BB3_2_1, ENDPOINT_ID_BB4_2_2)
+LINK_BB4_BB5_UUID, LINK_BB4_BB5_ID, LINK_BB4_BB5 = link(ENDPOINT_ID_BB4_2_1, ENDPOINT_ID_BB5_2_2)
+LINK_BB5_BB6_UUID, LINK_BB5_BB6_ID, LINK_BB5_BB6 = link(ENDPOINT_ID_BB5_2_1, ENDPOINT_ID_BB6_2_2)
+LINK_BB6_BB1_UUID, LINK_BB6_BB1_ID, LINK_BB6_BB1 = link(ENDPOINT_ID_BB6_2_1, ENDPOINT_ID_BB1_2_2)
+
+LINK_BB1_BB7_UUID, LINK_BB1_BB7_ID, LINK_BB1_BB7 = link(ENDPOINT_ID_BB1_2_3, ENDPOINT_ID_BB7_2_1)
+LINK_BB2_BB7_UUID, LINK_BB2_BB7_ID, LINK_BB2_BB7 = link(ENDPOINT_ID_BB2_2_3, ENDPOINT_ID_BB7_2_2)
+LINK_BB3_BB7_UUID, LINK_BB3_BB7_ID, LINK_BB3_BB7 = link(ENDPOINT_ID_BB3_2_3, ENDPOINT_ID_BB7_2_3)
+LINK_BB4_BB7_UUID, LINK_BB4_BB7_ID, LINK_BB4_BB7 = link(ENDPOINT_ID_BB4_2_3, ENDPOINT_ID_BB7_2_4)
+LINK_BB5_BB7_UUID, LINK_BB5_BB7_ID, LINK_BB5_BB7 = link(ENDPOINT_ID_BB5_2_3, ENDPOINT_ID_BB7_2_5)
+LINK_BB6_BB7_UUID, LINK_BB6_BB7_ID, LINK_BB6_BB7 = link(ENDPOINT_ID_BB6_2_3, ENDPOINT_ID_BB7_2_6)
+
+
+# ----- WIM Service Settings -------------------------------------------------------------------------------------------
+WIM_USERNAME = 'admin'
+WIM_PASSWORD = 'admin'
+
+def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]):
+    ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
+    ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
+    pe_device_uuid = pe_device_id['device_uuid']['uuid']
+    service_endpoint_id = '{:s}-{:s}-{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid)
+    bearer = '{:s}-{:s}'.format(ce_device_uuid, pe_device_uuid)
+    _mapping = {
+        'service_endpoint_id': service_endpoint_id,
+        'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
+        'service_mapping_info': {
+            'site-id': site_id,
+            'bearer': {'bearer-reference': bearer},
+        }
+    }
+    if priority is not None: _mapping['service_mapping_info']['priority'] = priority
+    if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant
+    return service_endpoint_id, _mapping
+
+WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', ENDPOINT_ID_CE1_1_1, DEVICE_PE1_ID, priority=10, redundant=['DC1-CE2-1/1'])
+WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', ENDPOINT_ID_CE2_1_1, DEVICE_PE2_ID, priority=20)
+WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', ENDPOINT_ID_CE3_1_1, DEVICE_PE3_ID, priority=10, redundant=['DC2-CE4-1/1'])
+WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', ENDPOINT_ID_CE4_1_1, DEVICE_PE4_ID, priority=20)
+
+WIM_MAPPING  = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC]
+
+WIM_SRV_VLAN_ID = 300
+WIM_SERVICE_TYPE = 'ELAN'
+WIM_SERVICE_CONNECTION_POINTS = [
+    {'service_endpoint_id': WIM_SEP_DC1_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+    {'service_endpoint_id': WIM_SEP_DC2_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+]
+
+
+# ----- Object Collections ---------------------------------------------------------------------------------------------
+
+CONTEXTS = [CONTEXT]
+TOPOLOGIES = [TOPOLOGY]
+
+DEVICES = [
+    (DEVICE_CE1, DEVICE_CE1_CONNECT_RULES),
+    (DEVICE_CE2, DEVICE_CE2_CONNECT_RULES),
+    (DEVICE_CE3, DEVICE_CE3_CONNECT_RULES),
+    (DEVICE_CE4, DEVICE_CE4_CONNECT_RULES),
+
+    (DEVICE_PE1, DEVICE_PE1_CONNECT_RULES),
+    (DEVICE_PE2, DEVICE_PE2_CONNECT_RULES),
+    (DEVICE_PE3, DEVICE_PE3_CONNECT_RULES),
+    (DEVICE_PE4, DEVICE_PE4_CONNECT_RULES),
+
+    (DEVICE_BB1, DEVICE_BB1_CONNECT_RULES),
+    (DEVICE_BB2, DEVICE_BB2_CONNECT_RULES),
+    (DEVICE_BB6, DEVICE_BB6_CONNECT_RULES),
+    (DEVICE_BB7, DEVICE_BB7_CONNECT_RULES),
+    (DEVICE_BB3, DEVICE_BB3_CONNECT_RULES),
+    (DEVICE_BB5, DEVICE_BB5_CONNECT_RULES),
+    (DEVICE_BB4, DEVICE_BB4_CONNECT_RULES),
+]
+
+LINKS = [
+    LINK_CE1_PE1, LINK_CE2_PE2, LINK_CE3_PE3, LINK_CE4_PE4,
+    LINK_PE1_BB1, LINK_PE1_BB2, LINK_PE2_BB1, LINK_PE2_BB2,
+    LINK_PE3_BB5, LINK_PE3_BB4, LINK_PE4_BB5, LINK_PE4_BB4,
+    LINK_BB1_BB2, LINK_BB2_BB3, LINK_BB3_BB4, LINK_BB4_BB5, LINK_BB5_BB6, LINK_BB6_BB1,
+    LINK_BB1_BB7, LINK_BB2_BB7, LINK_BB3_BB7, LINK_BB4_BB7, LINK_BB5_BB7, LINK_BB6_BB7,
+]
diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py
new file mode 100644
index 0000000000000000000000000000000000000000..94d205a64681c7b1978524c1938cbc6b944afb58
--- /dev/null
+++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py
@@ -0,0 +1,210 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, uuid
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Device import (
+    json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
+    json_device_emulated_packet_router_disabled, json_device_emulated_tapi_disabled, json_device_id)
+from common.tools.object_factory.EndPoint import json_endpoints
+from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
+from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
+from common.tools.object_factory.Topology import json_topology, json_topology_id
+
+# if true, Device component is present and will infeer the endpoints from connect-rules
+# if false, Device component is not present and device objects must contain preconfigured endpoints
+ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'True')
+ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'}
+
+def compose_router(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_ols(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'optical', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_tapi_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_link(endpoint_a, endpoint_z):
+    link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
+    link_id   = json_link_id(link_uuid)
+    link      = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
+    return link_id, link
+
+def compose_service(endpoint_a, endpoint_z, constraints=[]):
+    service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
+    endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]
+    service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints)
+    return service
+
+# ----- Context --------------------------------------------------------------------------------------------------------
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
+CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+
+# ----- Domains --------------------------------------------------------------------------------------------------------
+# Overall network topology
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
+TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
+
+# DataCenter #1 Network
+TOPO_DC1_UUID = 'DC1'
+TOPO_DC1_ID   = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID)
+TOPO_DC1      = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID)
+
+# DataCenter #2 Network
+TOPO_DC2_UUID = 'DC2'
+TOPO_DC2_ID   = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID)
+TOPO_DC2      = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID)
+
+# CellSite #1 Network
+TOPO_CS1_UUID = 'CS1'
+TOPO_CS1_ID   = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID)
+TOPO_CS1      = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID)
+
+# CellSite #2 Network
+TOPO_CS2_UUID = 'CS2'
+TOPO_CS2_ID   = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID)
+TOPO_CS2      = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID)
+
+# Transport Network Network
+TOPO_TN_UUID = 'TN'
+TOPO_TN_ID   = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID)
+TOPO_TN      = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID)
+
+
+# ----- Devices --------------------------------------------------------------------------------------------------------
+# DataCenters
+DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int'])
+DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int'])
+
+# CellSites
+DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1'])
+DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1'])
+DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1'])
+DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1'])
+
+# Transport Network
+#tols_ep_uuids = [str(uuid.uuid4()).split('-')[-1] for _ in range(4)]
+tols_ep_uuids = ['afd8ffbb5403', '04b84e213e83', '3169ae676ac6', '93506f786270']
+DEV_TOLS_ID, DEV_TOLS_EPS, DEV_TOLS = compose_ols('TN-OLS', tols_ep_uuids)
+
+
+# ----- Links ----------------------------------------------------------------------------------------------------------
+# InterDomain DC-CSGW
+LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0])
+LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0])
+LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
+LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
+
+# InterDomain CSGW-TN
+LINK_CS1GW1_TOLS_ID, LINK_CS1GW1_TOLS = compose_link(DEV_CS1GW1_EPS[1], DEV_TOLS_EPS[0])
+LINK_CS1GW2_TOLS_ID, LINK_CS1GW2_TOLS = compose_link(DEV_CS1GW2_EPS[1], DEV_TOLS_EPS[1])
+LINK_CS2GW1_TOLS_ID, LINK_CS2GW1_TOLS = compose_link(DEV_CS2GW1_EPS[1], DEV_TOLS_EPS[2])
+LINK_CS2GW2_TOLS_ID, LINK_CS2GW2_TOLS = compose_link(DEV_CS2GW2_EPS[1], DEV_TOLS_EPS[3])
+
+
+# ----- WIM Service Settings -------------------------------------------------------------------------------------------
+WIM_USERNAME = 'admin'
+WIM_PASSWORD = 'admin'
+
+def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]):
+    ce_endpoint_id = ce_endpoint_id['endpoint_id']
+    ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
+    ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
+    pe_device_uuid = pe_device_id['device_uuid']['uuid']
+    service_endpoint_id = '{:s}:{:s}:{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid)
+    bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
+    _mapping = {
+        'service_endpoint_id': service_endpoint_id,
+        'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
+        'service_mapping_info': {
+            'site-id': site_id,
+            'bearer': {'bearer-reference': bearer},
+        }
+    }
+    if priority is not None: _mapping['service_mapping_info']['priority'] = priority
+    if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant
+    return service_endpoint_id, _mapping
+
+WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1:DC1-GW:eth2'])
+WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1:DC1-GW:eth1'])
+WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2:DC2-GW:eth2'])
+WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2:DC2-GW:eth1'])
+
+WIM_MAPPING  = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC]
+
+WIM_SRV_VLAN_ID = 300
+WIM_SERVICE_TYPE = 'ELAN'
+WIM_SERVICE_CONNECTION_POINTS = [
+    {'service_endpoint_id': WIM_SEP_DC1_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+    {'service_endpoint_id': WIM_SEP_DC2_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+]
+
+
+# ----- Containers -----------------------------------------------------------------------------------------------------
+CONTEXTS   = [  CONTEXT ]
+TOPOLOGIES = [  TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ]
+DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
+                DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2,
+                DEV_TOLS,
+            ]
+LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_TOLS, LINK_CS1GW2_TOLS, LINK_CS2GW1_TOLS, LINK_CS2GW2_TOLS,
+            ]
+
+OBJECTS_PER_TOPOLOGY = [
+    (TOPO_ADMIN_ID,
+        [DEV_DC1GW_ID, DEV_DC2GW_ID, DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID, DEV_TOLS_ID],
+        [LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID],
+    ),
+    (TOPO_DC1_ID,
+        [DEV_DC1GW_ID],
+        []),
+    (TOPO_DC2_ID,
+        [DEV_DC2GW_ID],
+        []),
+    (TOPO_CS1_ID,
+        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+        []),
+    (TOPO_CS2_ID,
+        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+        []),
+    (TOPO_TN_ID,
+        [DEV_TOLS_ID],
+        []),
+]
diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py
new file mode 100644
index 0000000000000000000000000000000000000000..229e3d5fe3cee54fb7295ac0049507ec4e348a04
--- /dev/null
+++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py
@@ -0,0 +1,227 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Device import (
+    json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
+    json_device_emulated_packet_router_disabled, json_device_id)
+from common.tools.object_factory.EndPoint import json_endpoints
+from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
+from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
+from common.tools.object_factory.Topology import json_topology, json_topology_id
+
+# if true, Device component is present and will infeer the endpoints from connect-rules
+# if false, Device component is not present and device objects must contain preconfigured endpoints
+ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'True')
+ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'}
+
+def compose_router(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_link(endpoint_a, endpoint_z):
+    link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
+    link_id   = json_link_id(link_uuid)
+    link      = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
+    return link_id, link
+
+def compose_service(endpoint_a, endpoint_z, constraints=[]):
+    service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
+    endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]
+    service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints)
+    return service
+
+# ----- Context --------------------------------------------------------------------------------------------------------
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
+CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+
+# ----- Domains --------------------------------------------------------------------------------------------------------
+# Overall network topology
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
+TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
+
+# DataCenter #1 Network
+TOPO_DC1_UUID = 'DC1'
+TOPO_DC1_ID   = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID)
+TOPO_DC1      = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID)
+
+# DataCenter #2 Network
+TOPO_DC2_UUID = 'DC2'
+TOPO_DC2_ID   = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID)
+TOPO_DC2      = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID)
+
+# CellSite #1 Network
+TOPO_CS1_UUID = 'CS1'
+TOPO_CS1_ID   = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID)
+TOPO_CS1      = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID)
+
+# CellSite #2 Network
+TOPO_CS2_UUID = 'CS2'
+TOPO_CS2_ID   = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID)
+TOPO_CS2      = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID)
+
+# Transport Network Network
+TOPO_TN_UUID = 'TN'
+TOPO_TN_ID   = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID)
+TOPO_TN      = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID)
+
+
+# ----- Devices --------------------------------------------------------------------------------------------------------
+# DataCenters
+DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int'])
+DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int'])
+
+# CellSites
+DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1', '1/2'])
+DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1', '1/2'])
+DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1', '1/2'])
+DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1', '1/2'])
+
+# Transport Network
+DEV_TNR1_ID, DEV_TNR1_EPS, DEV_TNR1 = compose_router('TN-R1', ['1/1', '1/2', '2/1', '2/2', '2/3'])
+DEV_TNR2_ID, DEV_TNR2_EPS, DEV_TNR2 = compose_router('TN-R2', ['1/1', '1/2', '2/1', '2/2', '2/3'])
+DEV_TNR3_ID, DEV_TNR3_EPS, DEV_TNR3 = compose_router('TN-R3', ['1/1', '1/2', '2/1', '2/2', '2/3'])
+DEV_TNR4_ID, DEV_TNR4_EPS, DEV_TNR4 = compose_router('TN-R4', ['1/1', '1/2', '2/1', '2/2', '2/3'])
+
+
+# ----- Links ----------------------------------------------------------------------------------------------------------
+# InterDomain DC-CSGW
+LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0])
+LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0])
+LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
+LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
+
+# InterDomain CSGW-TN
+LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
+LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
+LINK_CS1GW1_TNR2_ID, LINK_CS1GW1_TNR2 = compose_link(DEV_CS1GW1_EPS[2], DEV_TNR2_EPS[1])
+LINK_CS1GW2_TNR1_ID, LINK_CS1GW2_TNR1 = compose_link(DEV_CS1GW2_EPS[2], DEV_TNR1_EPS[1])
+LINK_CS2GW1_TNR3_ID, LINK_CS2GW1_TNR3 = compose_link(DEV_CS2GW1_EPS[1], DEV_TNR3_EPS[0])
+LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4_EPS[0])
+LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
+LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
+
+# IntraDomain TN
+LINK_TNR1_TNR2_ID, LINK_TNR1_TNR2 = compose_link(DEV_TNR1_EPS[2], DEV_TNR2_EPS[3])
+LINK_TNR2_TNR3_ID, LINK_TNR2_TNR3 = compose_link(DEV_TNR2_EPS[2], DEV_TNR3_EPS[3])
+LINK_TNR3_TNR4_ID, LINK_TNR3_TNR4 = compose_link(DEV_TNR3_EPS[2], DEV_TNR4_EPS[3])
+LINK_TNR4_TNR1_ID, LINK_TNR4_TNR1 = compose_link(DEV_TNR4_EPS[2], DEV_TNR1_EPS[3])
+LINK_TNR1_TNR3_ID, LINK_TNR1_TNR3 = compose_link(DEV_TNR1_EPS[4], DEV_TNR3_EPS[4])
+LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4])
+
+
+# ----- WIM Service Settings -------------------------------------------------------------------------------------------
+WIM_USERNAME = 'admin'
+WIM_PASSWORD = 'admin'
+
+def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]):
+    ce_endpoint_id = ce_endpoint_id['endpoint_id']
+    ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
+    ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
+    pe_device_uuid = pe_device_id['device_uuid']['uuid']
+    service_endpoint_id = '{:s}:{:s}:{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid)
+    bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
+    _mapping = {
+        'service_endpoint_id': service_endpoint_id,
+        'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
+        'service_mapping_info': {
+            'site-id': site_id,
+            'bearer': {'bearer-reference': bearer},
+        }
+    }
+    if priority is not None: _mapping['service_mapping_info']['priority'] = priority
+    if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant
+    return service_endpoint_id, _mapping
+
+WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1:DC1-GW:eth2'])
+WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1:DC1-GW:eth1'])
+WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2:DC2-GW:eth2'])
+WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2:DC2-GW:eth1'])
+
+WIM_MAPPING  = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC]
+
+WIM_SRV_VLAN_ID = 300
+WIM_SERVICE_TYPE = 'ELAN'
+WIM_SERVICE_CONNECTION_POINTS = [
+    {'service_endpoint_id': WIM_SEP_DC1_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+    {'service_endpoint_id': WIM_SEP_DC2_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+]
+
+
+# ----- Containers -----------------------------------------------------------------------------------------------------
+CONTEXTS   = [  CONTEXT ]
+TOPOLOGIES = [  TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ]
+DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
+                DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2,
+                DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4,
+            ]
+LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
+                LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
+                LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4,
+            ]
+
+OBJECTS_PER_TOPOLOGY = [
+    (TOPO_ADMIN_ID,
+        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+        ],
+        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+            LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+            LINK_TNR2_TNR4_ID,
+        ],
+    ),
+    (TOPO_DC1_ID,
+        [DEV_DC1GW_ID],
+        []),
+    (TOPO_DC2_ID,
+        [DEV_DC2GW_ID],
+        []),
+    (TOPO_CS1_ID,
+        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+        []),
+    (TOPO_CS2_ID,
+        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+        []),
+    (TOPO_TN_ID,
+        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+        ],
+        [   LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
+            LINK_TNR2_TNR4_ID,
+        ]),
+]
diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py
new file mode 100644
index 0000000000000000000000000000000000000000..7063265f47344555d5b99c9c9747029227a494e0
--- /dev/null
+++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py
@@ -0,0 +1,239 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, uuid
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Device import (
+    json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
+    json_device_emulated_packet_router_disabled, json_device_emulated_tapi_disabled, json_device_id)
+from common.tools.object_factory.EndPoint import json_endpoints
+from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
+from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
+from common.tools.object_factory.Topology import json_topology, json_topology_id
+
+# if true, Device component is present and will infeer the endpoints from connect-rules
+# if false, Device component is not present and device objects must contain preconfigured endpoints
+ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'True')
+ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'}
+
+def compose_router(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_ols(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'optical', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_tapi_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None):
+    device_id = json_device_id(device_uuid)
+    r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
+    config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
+    endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
+    j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
+    device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
+    return device_id, endpoints, device
+
+def compose_link(endpoint_a, endpoint_z):
+    link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
+    link_id   = json_link_id(link_uuid)
+    link      = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
+    return link_id, link
+
+def compose_service(endpoint_a, endpoint_z, constraints=[]):
+    service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
+    endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]
+    service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints)
+    return service
+
+# ----- Context --------------------------------------------------------------------------------------------------------
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
+CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+
+# ----- Domains --------------------------------------------------------------------------------------------------------
+# Overall network topology
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
+TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
+
+# DataCenter #1 Network
+TOPO_DC1_UUID = 'DC1'
+TOPO_DC1_ID   = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID)
+TOPO_DC1      = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID)
+
+# DataCenter #2 Network
+TOPO_DC2_UUID = 'DC2'
+TOPO_DC2_ID   = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID)
+TOPO_DC2      = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID)
+
+# CellSite #1 Network
+TOPO_CS1_UUID = 'CS1'
+TOPO_CS1_ID   = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID)
+TOPO_CS1      = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID)
+
+# CellSite #2 Network
+TOPO_CS2_UUID = 'CS2'
+TOPO_CS2_ID   = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID)
+TOPO_CS2      = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID)
+
+# Transport Network Network
+TOPO_TN_UUID = 'TN'
+TOPO_TN_ID   = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID)
+TOPO_TN      = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID)
+
+
+# ----- Devices --------------------------------------------------------------------------------------------------------
+# DataCenters
+DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int'])
+DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int'])
+
+# CellSites
+DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1', '1/2'])
+DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1', '1/2'])
+DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1', '1/2'])
+DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1', '1/2'])
+
+# Transport Network
+DEV_TNR1_ID, DEV_TNR1_EPS, DEV_TNR1 = compose_router('TN-R1', ['1/1', '1/2', '2/1'])
+DEV_TNR2_ID, DEV_TNR2_EPS, DEV_TNR2 = compose_router('TN-R2', ['1/1', '1/2', '2/1'])
+DEV_TNR3_ID, DEV_TNR3_EPS, DEV_TNR3 = compose_router('TN-R3', ['1/1', '1/2', '2/1'])
+DEV_TNR4_ID, DEV_TNR4_EPS, DEV_TNR4 = compose_router('TN-R4', ['1/1', '1/2', '2/1'])
+
+#tols_ep_uuids = [str(uuid.uuid4()).split('-')[-1] for _ in range(4)]
+tols_ep_uuids = ['afd8ffbb5403', '04b84e213e83', '3169ae676ac6', '93506f786270']
+DEV_TOLS_ID, DEV_TOLS_EPS, DEV_TOLS = compose_ols('TN-OLS', tols_ep_uuids)
+
+
+# ----- Links ----------------------------------------------------------------------------------------------------------
+# InterDomain DC-CSGW
+LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0])
+LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0])
+LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
+LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
+
+# InterDomain CSGW-TN
+LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
+LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
+LINK_CS1GW1_TNR2_ID, LINK_CS1GW1_TNR2 = compose_link(DEV_CS1GW1_EPS[2], DEV_TNR2_EPS[1])
+LINK_CS1GW2_TNR1_ID, LINK_CS1GW2_TNR1 = compose_link(DEV_CS1GW2_EPS[2], DEV_TNR1_EPS[1])
+LINK_CS2GW1_TNR3_ID, LINK_CS2GW1_TNR3 = compose_link(DEV_CS2GW1_EPS[1], DEV_TNR3_EPS[0])
+LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4_EPS[0])
+LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
+LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
+
+# IntraDomain TN
+LINK_TNR1_TOLS_ID, LINK_TNR1_TOLS = compose_link(DEV_TNR1_EPS[2], DEV_TOLS_EPS[0])
+LINK_TNR2_TOLS_ID, LINK_TNR2_TOLS = compose_link(DEV_TNR2_EPS[2], DEV_TOLS_EPS[1])
+LINK_TNR3_TOLS_ID, LINK_TNR3_TOLS = compose_link(DEV_TNR3_EPS[2], DEV_TOLS_EPS[2])
+LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3])
+
+
+# ----- WIM Service Settings -------------------------------------------------------------------------------------------
+WIM_USERNAME = 'admin'
+WIM_PASSWORD = 'admin'
+
+def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]):
+    ce_endpoint_id = ce_endpoint_id['endpoint_id']
+    ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
+    ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
+    pe_device_uuid = pe_device_id['device_uuid']['uuid']
+    service_endpoint_id = '{:s}:{:s}:{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid)
+    bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
+    _mapping = {
+        'service_endpoint_id': service_endpoint_id,
+        'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
+        'service_mapping_info': {
+            'site-id': site_id,
+            'bearer': {'bearer-reference': bearer},
+        }
+    }
+    if priority is not None: _mapping['service_mapping_info']['priority'] = priority
+    if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant
+    return service_endpoint_id, _mapping
+
+WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1:DC1-GW:eth2'])
+WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1:DC1-GW:eth1'])
+WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2:DC2-GW:eth2'])
+WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2:DC2-GW:eth1'])
+
+WIM_MAPPING  = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC]
+
+WIM_SRV_VLAN_ID = 300
+WIM_SERVICE_TYPE = 'ELAN'
+WIM_SERVICE_CONNECTION_POINTS = [
+    {'service_endpoint_id': WIM_SEP_DC1_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+    {'service_endpoint_id': WIM_SEP_DC2_PRI,
+        'service_endpoint_encapsulation_type': 'dot1q',
+        'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
+]
+
+
+# ----- Containers -----------------------------------------------------------------------------------------------------
+CONTEXTS   = [  CONTEXT ]
+TOPOLOGIES = [  TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ]
+DEVICES    = [  DEV_DC1GW, DEV_DC2GW,
+                DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2,
+                DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4,
+                DEV_TOLS,
+            ]
+LINKS      = [  LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
+                LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
+                LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
+                LINK_TNR1_TOLS, LINK_TNR2_TOLS, LINK_TNR3_TOLS, LINK_TNR4_TOLS,
+            ]
+
+OBJECTS_PER_TOPOLOGY = [
+    (TOPO_ADMIN_ID,
+        [   DEV_DC1GW_ID, DEV_DC2GW_ID,
+            DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
+            DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+            DEV_TOLS_ID,
+        ],
+        [   LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
+            LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
+            LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
+            LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+        ],
+    ),
+    (TOPO_DC1_ID,
+        [DEV_DC1GW_ID],
+        []),
+    (TOPO_DC2_ID,
+        [DEV_DC2GW_ID],
+        []),
+    (TOPO_CS1_ID,
+        [DEV_CS1GW1_ID, DEV_CS1GW2_ID],
+        []),
+    (TOPO_CS2_ID,
+        [DEV_CS2GW1_ID, DEV_CS2GW2_ID],
+        []),
+    (TOPO_TN_ID,
+        [   DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID,
+            DEV_TOLS_ID,
+        ],
+        [   LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID,
+        ]),
+]
diff --git a/src/tests/ecoc22/tests/Tools.py b/src/tests/ecoc22/tests/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..33205da9baeb6c9fe93a389e9744053aea664b16
--- /dev/null
+++ b/src/tests/ecoc22/tests/Tools.py
@@ -0,0 +1,36 @@
+from typing import Dict, List, Tuple
+from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
+from common.tools.object_factory.Link import json_link, json_link_id
+
+def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]):
+    return [
+        json_endpoint_id(device_id, ep_uuid, topology_id=None)
+        for ep_uuid, _, _ in endpoint_descriptors
+    ]
+
+def json_endpoints(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]):
+    return [
+        json_endpoint(device_id, ep_uuid, ep_type, topology_id=None, kpi_sample_types=ep_sample_types)
+        for ep_uuid, ep_type, ep_sample_types in endpoint_descriptors
+    ]
+
+def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str:
+    return '{:s}/{:s}=={:s}/{:s}'.format(
+        a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'],
+        a_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
+
+def link(a_endpoint_id, z_endpoint_id) -> Tuple[str, Dict, Dict]:
+    link_uuid = get_link_uuid(a_endpoint_id, z_endpoint_id)
+    link_id   = json_link_id(link_uuid)
+    link_data = json_link(link_uuid, [a_endpoint_id, z_endpoint_id])
+    return link_uuid, link_id, link_data
+
+def compose_service_endpoint_id(endpoint_id):
+    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
+    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
+    return ':'.join([device_uuid, endpoint_uuid])
+
+def compose_bearer(endpoint_id):
+    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
+    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
+    return ':'.join([device_uuid, endpoint_uuid])
diff --git a/src/tests/ecoc22/tests/__init__.py b/src/tests/ecoc22/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/ecoc22/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..14ee21658838b21d989646134f263f7961fc6c11
--- /dev/null
+++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py
@@ -0,0 +1,91 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from .Fixtures import context_client, device_client
+#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+#from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
+#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
+from .Objects_DC_CSGW_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+def test_scenario_empty(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure database is empty -------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == 0
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 0
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == 0
+
+
+def test_prepare_environment(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient):  # pylint: disable=redefined-outer-name
+
+    for context  in CONTEXTS  : context_client.SetContext (Context (**context ))
+    for topology in TOPOLOGIES: context_client.SetTopology(Topology(**topology))
+
+    for device   in DEVICES   : device_client .AddDevice  (Device  (**device  ))
+    for topology_id, device_ids, _ in OBJECTS_PER_TOPOLOGY:
+        topology = Topology()
+        topology.CopyFrom(context_client.GetTopology(TopologyId(**topology_id)))
+
+        device_ids_in_topology = {device_id.device_uuid.uuid for device_id in topology.device_ids}
+        func_device_id_not_added = lambda device_id: device_id['device_uuid']['uuid'] not in device_ids_in_topology
+        func_device_id_json_to_grpc = lambda device_id: DeviceId(**device_id)
+        device_ids_to_add = list(map(func_device_id_json_to_grpc, filter(func_device_id_not_added, device_ids)))
+        topology.device_ids.extend(device_ids_to_add)
+
+        context_client.SetTopology(topology)
+
+    for link     in LINKS     : context_client.SetLink    (Link    (**link    ))
+    for topology_id, _, link_ids in OBJECTS_PER_TOPOLOGY:
+        topology = Topology()
+        topology.CopyFrom(context_client.GetTopology(TopologyId(**topology_id)))
+
+        link_ids_in_topology = {link_id.link_uuid.uuid for link_id in topology.link_ids}
+        func_link_id_not_added = lambda link_id: link_id['link_uuid']['uuid'] not in link_ids_in_topology
+        func_link_id_json_to_grpc = lambda link_id: LinkId(**link_id)
+        link_ids_to_add = list(map(func_link_id_json_to_grpc, filter(func_link_id_not_added, link_ids)))
+        topology.link_ids.extend(link_ids_to_add)
+
+        context_client.SetTopology(topology)
+
+
+def test_scenario_ready(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == len(DEVICES)
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == len(LINKS)
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 0
diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fc61e818ac5371ea0730ce40db4f69e56324668
--- /dev/null
+++ b/src/tests/ecoc22/tests/test_functional_cleanup.py
@@ -0,0 +1,67 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from .Fixtures import context_client, device_client
+#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+#from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+from .Objects_DC_CSGW_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == len(DEVICES)
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == len(LINKS)
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 0
+
+
+def test_scenario_cleanup(
+    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
+
+    for link     in LINKS     : context_client.RemoveLink    (LinkId    (**link    ['link_id'    ]))
+    for device   in DEVICES   : device_client .DeleteDevice  (DeviceId  (**device  ['device_id'  ]))
+    for topology in TOPOLOGIES: context_client.RemoveTopology(TopologyId(**topology['topology_id']))
+    for context  in CONTEXTS  : context_client.RemoveContext (ContextId (**context ['context_id' ]))
+
+
+def test_scenario_empty_again(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure database is empty again -------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == 0
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 0
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == 0
diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f576db836b6868dbc8617c1e81686b4f6ee5093
--- /dev/null
+++ b/src/tests/ecoc22/tests/test_functional_create_service.py
@@ -0,0 +1,84 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from compute.tests.mock_osm.MockOSM import MockOSM
+from context.client.ContextClient import ContextClient
+from .Fixtures import context_client, osm_wim
+#from .Objects_BigNet import (
+#    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+#from .Objects_DC_CSGW_TN import (
+#    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+#from .Objects_DC_CSGW_TN_OLS import (
+#    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+from .Objects_DC_CSGW_OLS import (
+    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure links are created -------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == len(DEVICES)
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == len(LINKS)
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 0
+
+
+def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
+    # ----- Create Service ---------------------------------------------------------------------------------------------
+    service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
+    osm_wim.get_connectivity_service_status(service_uuid)
+
+
+def test_scenario_service_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure service is created ------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == len(DEVICES)
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == len(LINKS)
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    assert len(response.services) == 3 # 1xL2NM + 2xTAPI
+    for service in response.services:
+        service_id = service.service_id
+        response = context_client.ListConnections(service_id)
+        LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
+        if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM:
+            assert len(response.connections) == 2 # 2 connections per service (primary + backup)
+        elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
+            assert len(response.connections) == 1 # 1 connection per service
diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..89d7a621fb21a7c26da8bd64269ca1e02ececebb
--- /dev/null
+++ b/src/tests/ecoc22/tests/test_functional_delete_service.py
@@ -0,0 +1,102 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, pytest
+from common.DeviceTypes import DeviceTypeEnum
+from common.Settings import get_setting
+from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events
+from common.tools.object_factory.Connection import json_connection_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.Service import json_service_id
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from compute.tests.mock_osm.MockOSM import MockOSM
+from context.client.ContextClient import ContextClient
+from .Fixtures import context_client, osm_wim
+#from .Objects_BigNet import (
+#    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+#from .Objects_DC_CSGW_TN import (
+#    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+#from .Objects_DC_CSGW_TN_OLS import (
+#    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+from .Objects_DC_CSGW_OLS import (
+    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
+DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
+
+def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure service is created ------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == len(DEVICES)
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == len(LINKS)
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    assert len(response.services) == 3 # 1xL2NM + 2xTAPI
+    for service in response.services:
+        service_id = service.service_id
+        response = context_client.ListConnections(service_id)
+        LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
+        if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM:
+            assert len(response.connections) == 2 # 2 connections per service (primary + backup)
+        elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
+            assert len(response.connections) == 1 # 1 connection per service
+
+
+def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
+    # ----- Delete Service ---------------------------------------------------------------------------------------------
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    service_uuids = set()
+    for service in response.services:
+        if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: continue
+        service_uuid = service.service_id.service_uuid.uuid
+        service_uuids.add(service_uuid)
+        osm_wim.conn_info[service_uuid] = {}
+
+    assert len(service_uuids) == 1  # assume a single service has been created
+    service_uuid = set(service_uuids).pop()
+
+    osm_wim.delete_connectivity_service(service_uuid)
+
+
+def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == len(DEVICES)
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == len(LINKS)
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 0
diff --git a/src/tests/oeccpsc22/tests/Tools.py b/src/tests/oeccpsc22/tests/Tools.py
index a782b6bb3e541e4331f5f95164e69def5640f556..d26c8ae11468f05dc48cb55dc202b9f0efc1d3b6 100644
--- a/src/tests/oeccpsc22/tests/Tools.py
+++ b/src/tests/oeccpsc22/tests/Tools.py
@@ -12,7 +12,7 @@ def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, s
 def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str:
     return '{:s}/{:s}=={:s}/{:s}'.format(
         a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'],
-        a_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
+        z_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
 
 def compose_service_endpoint_id(endpoint_id):
     device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8afd683843d4882e75c3cbca8363aa3d63edda7f
--- /dev/null
+++ b/src/tests/ofc22/deploy_specs.sh
@@ -0,0 +1,17 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
diff --git a/src/tests/ofc22/run_test_02_create_service.sh b/src/tests/ofc22/run_test_02_create_service.sh
index 20fc3db65dd57ae8697253443050b1767d9b77a1..8b6c8658df759bdcb777f83c6c7846d0ea7b48ed 100755
--- a/src/tests/ofc22/run_test_02_create_service.sh
+++ b/src/tests/ofc22/run_test_02_create_service.sh
@@ -13,4 +13,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+source tfs_runtime_env_vars.sh
 pytest --verbose src/tests/ofc22/tests/test_functional_create_service.py
diff --git a/src/tests/ofc22/run_test_03_delete_service.sh b/src/tests/ofc22/run_test_03_delete_service.sh
index 98073013d84e9d64e56dd9022ac163b6321ce389..51df41aee216e141b0d2e2f55a0398ecd9cdf35f 100755
--- a/src/tests/ofc22/run_test_03_delete_service.sh
+++ b/src/tests/ofc22/run_test_03_delete_service.sh
@@ -13,4 +13,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+source tfs_runtime_env_vars.sh
 pytest --verbose src/tests/ofc22/tests/test_functional_delete_service.py
diff --git a/src/tests/ofc22/run_test_04_cleanup.sh b/src/tests/ofc22/run_test_04_cleanup.sh
index f7c0aad8da0b0446d188ec1fad3f0fc0e7dc2b4a..2ba91684f9eb49075dd68877e54976f989811ae9 100755
--- a/src/tests/ofc22/run_test_04_cleanup.sh
+++ b/src/tests/ofc22/run_test_04_cleanup.sh
@@ -13,4 +13,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+source tfs_runtime_env_vars.sh
 pytest --verbose src/tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/run_tests_and_coverage.sh b/src/tests/ofc22/run_tests_and_coverage.sh
index fa5026db2310c8753d8e4476707ce46a38ecb0f2..bafc920c71a640d083497e1cd6ae025d0ea7cef5 100755
--- a/src/tests/ofc22/run_tests_and_coverage.sh
+++ b/src/tests/ofc22/run_tests_and_coverage.sh
@@ -29,6 +29,8 @@ rm -f $COVERAGEFILE
 # Force a flush of Context database
 kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
 
+source tfs_runtime_env_vars.sh
+
 # Run functional tests and analyze code coverage at the same time
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     tests/ofc22/tests/test_functional_bootstrap.py
diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py
index 56231512d49542f4cca4a0850767409d340d6852..3ea9393c5e7f575b24a7fd0ec2f5de929900d066 100644
--- a/src/tests/ofc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ofc22/tests/test_functional_bootstrap.py
@@ -59,10 +59,10 @@ def test_scenario_empty(context_client : ContextClient):  # pylint: disable=rede
 def test_prepare_scenario(context_client : ContextClient):  # pylint: disable=redefined-outer-name
 
     # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client)
-    events_collector.start()
+    #events_collector = EventsCollector(context_client)
+    #events_collector.start()
 
-    expected_events = []
+    #expected_events = []
 
     # ----- Create Contexts and Topologies -----------------------------------------------------------------------------
     for context in CONTEXTS:
@@ -70,7 +70,7 @@ def test_prepare_scenario(context_client : ContextClient):  # pylint: disable=re
         LOGGER.info('Adding Context {:s}'.format(context_uuid))
         response = context_client.SetContext(Context(**context))
         assert response.context_uuid.uuid == context_uuid
-        expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid)))
+        #expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid)))
 
     for topology in TOPOLOGIES:
         context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
@@ -80,13 +80,13 @@ def test_prepare_scenario(context_client : ContextClient):  # pylint: disable=re
         assert response.context_id.context_uuid.uuid == context_uuid
         assert response.topology_uuid.uuid == topology_uuid
         context_id = json_context_id(context_uuid)
-        expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id)))
+        #expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id)))
 
     # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    check_events(events_collector, expected_events)
+    #check_events(events_collector, expected_events)
 
     # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
+    #events_collector.stop()
 
 
 def test_scenario_ready(context_client : ContextClient):  # pylint: disable=redefined-outer-name
@@ -111,10 +111,10 @@ def test_devices_bootstraping(
     context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
 
     # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client, log_events_received=True)
-    events_collector.start()
+    #events_collector = EventsCollector(context_client, log_events_received=True)
+    #events_collector.start()
 
-    expected_events = []
+    #expected_events = []
 
     # ----- Create Devices and Validate Collected Events ---------------------------------------------------------------
     for device, connect_rules in DEVICES:
@@ -126,11 +126,11 @@ def test_devices_bootstraping(
         response = device_client.AddDevice(Device(**device_with_connect_rules))
         assert response.device_uuid.uuid == device_uuid
 
-        expected_events.extend([
-            # Device creation, update for automation to start the device
-            ('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)),
-            #('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)),
-        ])
+        #expected_events.extend([
+        #    # Device creation, update for automation to start the device
+        #    ('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)),
+        #    #('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)),
+        #])
 
         #response = context_client.GetDevice(response)
         #for endpoint in response.device_endpoints:
@@ -139,10 +139,10 @@ def test_devices_bootstraping(
         #        expected_events.append(('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)))
 
     # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    check_events(events_collector, expected_events)
+    #check_events(events_collector, expected_events)
 
     # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
+    #events_collector.stop()
 
 
 def test_devices_bootstrapped(context_client : ContextClient):  # pylint: disable=redefined-outer-name
@@ -166,10 +166,10 @@ def test_devices_bootstrapped(context_client : ContextClient):  # pylint: disabl
 def test_links_creation(context_client : ContextClient):  # pylint: disable=redefined-outer-name
 
     # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client)
-    events_collector.start()
+    #events_collector = EventsCollector(context_client)
+    #events_collector.start()
 
-    expected_events = []
+    #expected_events = []
 
     # ----- Create Links and Validate Collected Events -----------------------------------------------------------------
     for link in LINKS:
@@ -177,13 +177,13 @@ def test_links_creation(context_client : ContextClient):  # pylint: disable=rede
         LOGGER.info('Adding Link {:s}'.format(link_uuid))
         response = context_client.SetLink(Link(**link))
         assert response.link_uuid.uuid == link_uuid
-        expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid)))
+        #expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid)))
 
     # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    check_events(events_collector, expected_events)
+    #check_events(events_collector, expected_events)
 
     # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
+    #events_collector.stop()
 
 
 def test_links_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py
index 6c3a79dfd2e1e46d5c3e4b2d5e33f0ae42decd00..60bb86b50853680e0699906dcb28ebd2e8777bb4 100644
--- a/src/tests/ofc22/tests/test_functional_cleanup.py
+++ b/src/tests/ofc22/tests/test_functional_cleanup.py
@@ -65,10 +65,10 @@ def test_scenario_cleanup(
     context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
 
     # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client)
-    events_collector.start()
+    #events_collector = EventsCollector(context_client)
+    #events_collector.start()
 
-    expected_events = []
+    #expected_events = []
 
     # ----- Delete Links and Validate Collected Events -----------------------------------------------------------------
     for link in LINKS:
@@ -76,7 +76,7 @@ def test_scenario_cleanup(
         link_uuid = link_id['link_uuid']['uuid']
         LOGGER.info('Deleting Link {:s}'.format(link_uuid))
         context_client.RemoveLink(LinkId(**link_id))
-        expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid)))
+        #expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid)))
 
     # ----- Delete Devices and Validate Collected Events ---------------------------------------------------------------
     for device, _ in DEVICES:
@@ -84,7 +84,7 @@ def test_scenario_cleanup(
         device_uuid = device_id['device_uuid']['uuid']
         LOGGER.info('Deleting Device {:s}'.format(device_uuid))
         device_client.DeleteDevice(DeviceId(**device_id))
-        expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
+        #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
 
     # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------
     for topology in TOPOLOGIES:
@@ -94,7 +94,7 @@ def test_scenario_cleanup(
         LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
         context_client.RemoveTopology(TopologyId(**topology_id))
         context_id = json_context_id(context_uuid)
-        expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
+        #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
 
     # ----- Delete Contexts and Validate Collected Events --------------------------------------------------------------
     for context in CONTEXTS:
@@ -102,13 +102,13 @@ def test_scenario_cleanup(
         context_uuid = context_id['context_uuid']['uuid']
         LOGGER.info('Deleting Context {:s}'.format(context_uuid))
         context_client.RemoveContext(ContextId(**context_id))
-        expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
+        #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
 
     # ----- Validate Collected Events ----------------------------------------------------------------------------------
-    check_events(events_collector, expected_events)
+    #check_events(events_collector, expected_events)
 
     # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
+    #events_collector.stop()
 
 
 def test_scenario_empty_again(context_client : ContextClient):  # pylint: disable=redefined-outer-name
diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py
index a630501815194deb5f49a07bd9f7e6c1b5d03dcd..a12fdb18bb849cac60df958d2178c1982bc7d5d8 100644
--- a/src/tests/ofc22/tests/test_functional_create_service.py
+++ b/src/tests/ofc22/tests/test_functional_create_service.py
@@ -69,8 +69,8 @@ def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable
 
 def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
     # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client, log_events_received=True)
-    events_collector.start()
+    #events_collector = EventsCollector(context_client, log_events_received=True)
+    #events_collector.start()
 
     # ----- Create Service ---------------------------------------------------------------------------------------------
     service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
@@ -78,30 +78,30 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): #
 
     # ----- Validate collected events ----------------------------------------------------------------------------------
 
-    packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
-    optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
-    optical_service_uuid = '{:s}:optical'.format(service_uuid)
-
-    expected_events = [
-        # Create packet service and add first endpoint
-        ('ServiceEvent',    EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-        ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-
-        # Configure OLS controller, create optical service, create optical connection
-        ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
-        ('ServiceEvent',    EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
-        ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)),
-
-        # Configure endpoint packet devices, add second endpoint to service, create connection
-        ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
-        ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
-        ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-        ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)),
-    ]
-    check_events(events_collector, expected_events)
+    #packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
+    #optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
+    #optical_service_uuid = '{:s}:optical'.format(service_uuid)
+
+    #expected_events = [
+    #    # Create packet service and add first endpoint
+    #    ('ServiceEvent',    EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
+    #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
+    #
+    #    # Configure OLS controller, create optical service, create optical connection
+    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
+    #    ('ServiceEvent',    EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
+    #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)),
+    #
+    #    # Configure endpoint packet devices, add second endpoint to service, create connection
+    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
+    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
+    #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
+    #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)),
+    #]
+    #check_events(events_collector, expected_events)
 
     # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
+    #events_collector.stop()
 
 
 def test_scenario_service_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py
index 222dee5adc0839df9b9a6cac1dcdd08ecb2ec195..19a7ecd8d0a5235a54a4d1378dd50b1a37cbd46b 100644
--- a/src/tests/ofc22/tests/test_functional_delete_service.py
+++ b/src/tests/ofc22/tests/test_functional_delete_service.py
@@ -23,7 +23,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
 from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ContextId, Empty
+from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
 from .Objects import (
     CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING,
     WIM_PASSWORD, WIM_USERNAME)
@@ -77,43 +77,43 @@ def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable
 
 def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
     # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client, log_events_received=True)
-    events_collector.start()
+    #events_collector = EventsCollector(context_client, log_events_received=True)
+    #events_collector.start()
 
     # ----- Delete Service ---------------------------------------------------------------------------------------------
-    response = context_client.ListServiceIds(ContextId(**CONTEXT_ID))
-    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.service_ids), grpc_message_to_json_string(response)))
-    assert len(response.service_ids) == 2 # L3NM + TAPI
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    assert len(response.services) == 2 # L3NM + TAPI
     service_uuids = set()
-    for service_id in response.service_ids:
-        service_uuid = service_id.service_uuid.uuid
-        if service_uuid.endswith(':optical'): continue
+    for service in response.services:
+        if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: continue
+        service_uuid = service.service_id.service_uuid.uuid
         service_uuids.add(service_uuid)
         osm_wim.conn_info[service_uuid] = {}
 
-    assert len(service_uuids) == 1  # assume a single service has been created
+    assert len(service_uuids) == 1  # assume a single L3NM service has been created
     service_uuid = set(service_uuids).pop()
 
     osm_wim.delete_connectivity_service(service_uuid)
 
     # ----- Validate collected events ----------------------------------------------------------------------------------
-    packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
-    optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
-    optical_service_uuid = '{:s}:optical'.format(service_uuid)
-
-    expected_events = [
-        ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)),
-        ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
-        ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
-        ('ServiceEvent',    EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-        ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)),
-        ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
-        ('ServiceEvent',    EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
-    ]
-    check_events(events_collector, expected_events)
+    #packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
+    #optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
+    #optical_service_uuid = '{:s}:optical'.format(service_uuid)
+
+    #expected_events = [
+    #    ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)),
+    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
+    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
+    #    ('ServiceEvent',    EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
+    #    ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)),
+    #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
+    #    ('ServiceEvent',    EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
+    #]
+    #check_events(events_collector, expected_events)
 
     # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
+    #events_collector.stop()
 
 
 def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile
index 7760416be32b893ed5f2408b70e874fb89721e17..a17d2bd9aea9c6948262dcf17776f75c0be351b8 100644
--- a/src/webui/Dockerfile
+++ b/src/webui/Dockerfile
@@ -79,6 +79,8 @@ COPY --chown=webui:webui src/device/__init__.py device/__init__.py
 COPY --chown=webui:webui src/device/client/. device/client/
 COPY --chown=webui:webui src/service/__init__.py service/__init__.py
 COPY --chown=webui:webui src/service/client/. service/client/
+COPY --chown=webui:webui src/slice/__init__.py slice/__init__.py
+COPY --chown=webui:webui src/slice/client/. slice/client/
 COPY --chown=webui:webui src/webui/. webui/
 
 # Start the service
diff --git a/src/webui/grafana_backup_dashboard.json b/src/webui/grafana_backup_dashboard.json
new file mode 100644
index 0000000000000000000000000000000000000000..58a856a6c50de422b1f6bde1e2799d53762db916
--- /dev/null
+++ b/src/webui/grafana_backup_dashboard.json
@@ -0,0 +1,320 @@
+{
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "target": {
+            "limit": 100,
+            "matchAny": false,
+            "tags": [],
+            "type": "dashboard"
+          },
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "fiscalYearStartMonth": 0,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": 1,
+    "iteration": 1664282779131,
+    "links": [],
+    "liveNow": false,
+    "panels": [
+      {
+        "datasource": null,
+        "fieldConfig": {
+          "defaults": {
+            "color": {
+              "mode": "palette-classic"
+            },
+            "custom": {
+              "axisLabel": "",
+              "axisPlacement": "auto",
+              "barAlignment": 0,
+              "drawStyle": "line",
+              "fillOpacity": 0,
+              "gradientMode": "none",
+              "hideFrom": {
+                "legend": false,
+                "tooltip": false,
+                "viz": false
+              },
+              "lineInterpolation": "smooth",
+              "lineWidth": 1,
+              "pointSize": 5,
+              "scaleDistribution": {
+                "type": "linear"
+              },
+              "showPoints": "never",
+              "spanNulls": false,
+              "stacking": {
+                "group": "A",
+                "mode": "none"
+              },
+              "thresholdsStyle": {
+                "mode": "off"
+              }
+            },
+            "mappings": [],
+            "thresholds": {
+              "mode": "absolute",
+              "steps": [
+                {
+                  "color": "green",
+                  "value": null
+                },
+                {
+                  "color": "red",
+                  "value": 80
+                }
+              ]
+            }
+          },
+          "overrides": [
+            {
+              "matcher": {
+                "id": "byRegexp",
+                "options": ".* PACKETS_.*"
+              },
+              "properties": [
+                {
+                  "id": "custom.axisPlacement",
+                  "value": "left"
+                },
+                {
+                  "id": "unit",
+                  "value": "pps"
+                },
+                {
+                  "id": "custom.axisLabel",
+                  "value": "Packets / sec"
+                },
+                {
+                  "id": "custom.axisSoftMin",
+                  "value": 0
+                }
+              ]
+            },
+            {
+              "matcher": {
+                "id": "byRegexp",
+                "options": ".* BYTES_.*"
+              },
+              "properties": [
+                {
+                  "id": "custom.axisPlacement",
+                  "value": "right"
+                },
+                {
+                  "id": "unit",
+                  "value": "Bps"
+                },
+                {
+                  "id": "custom.axisLabel",
+                  "value": "Bytes / sec"
+                },
+                {
+                  "id": "custom.axisSoftMin",
+                  "value": 0
+                }
+              ]
+            }
+          ]
+        },
+        "gridPos": {
+          "h": 19,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "id": 2,
+        "options": {
+          "legend": {
+            "calcs": [
+              "first",
+              "min",
+              "mean",
+              "max",
+              "lastNotNull"
+            ],
+            "displayMode": "table",
+            "placement": "right"
+          },
+          "tooltip": {
+            "mode": "multi"
+          }
+        },
+        "targets": [
+          {
+            "format": "time_series",
+            "group": [],
+            "hide": false,
+            "metricColumn": "kpi_value",
+            "rawQuery": false,
+            "rawSql": "SELECT\n  timestamp AS \"time\",\n  kpi_value AS metric,\n  kpi_value AS \"kpi_value\"\nFROM monitoring\nWHERE\n  $__timeFilter(timestamp) AND\n  device_id = $device_id AND\n  endpoint_id = $endpoint_id\nORDER BY 1,2",
+            "refId": "A",
+            "select": [
+              [
+                {
+                  "params": [
+                    "kpi_value"
+                  ],
+                  "type": "column"
+                },
+                {
+                  "params": [
+                    "avg"
+                  ],
+                  "type": "aggregate"
+                },
+                {
+                  "params": [
+                    "kpi_value"
+                  ],
+                  "type": "alias"
+                }
+              ]
+            ],
+            "table": "monitoring",
+            "timeColumn": "timestamp",
+            "where": [
+              {
+                "name": "$__timeFilter",
+                "params": [],
+                "type": "macro"
+              },
+              {
+                "name": "",
+                "params": [
+                  "device_id",
+                  "=",
+                  "$device_id"
+                ],
+                "type": "expression"
+              },
+              {
+                "name": "",
+                "params": [
+                  "endpoint_id",
+                  "=",
+                  "$endpoint_id"
+                ],
+                "type": "expression"
+              }
+            ]
+          }
+        ],
+        "title": "L3 Monitoring Packets/Bytes Received/Sent",
+        "transformations": [],
+        "type": "timeseries"
+      }
+    ],
+    "refresh": "",
+    "schemaVersion": 32,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": null,
+          "current": {
+            "selected": true,
+            "text": [
+              "R1-EMU"
+            ],
+            "value": [
+              "R1-EMU"
+            ]
+          },
+          "datasource": null,
+          "definition": "SELECT DISTINCT device_id FROM monitoring;",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Device",
+          "multi": true,
+          "name": "device_id",
+          "options": [],
+          "query": "SELECT DISTINCT device_id FROM monitoring;",
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "type": "query"
+        },
+        {
+          "allValue": null,
+          "current": {
+            "selected": true,
+            "text": [
+              "13/1/2"
+            ],
+            "value": [
+              "13/1/2"
+            ]
+          },
+          "datasource": null,
+          "definition": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "EndPoint",
+          "multi": true,
+          "name": "endpoint_id",
+          "options": [],
+          "query": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "type": "query"
+        },
+        {
+          "allValue": null,
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": null,
+          "definition": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
+          "description": null,
+          "error": null,
+          "hide": 0,
+          "includeAll": true,
+          "label": "Kpi Sample Type",
+          "multi": true,
+          "name": "kpi_sample_type",
+          "options": [],
+          "query": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "type": "query"
+        }
+      ]
+    },
+    "time": {
+      "from": "now-5m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "L3 Monitoring",
+    "uid": "tf-l3-monit",
+    "version": 3
+  }
\ No newline at end of file
diff --git a/src/webui/grafana_dashboard.json b/src/webui/grafana_dashboard.json
index a845ac20c7861b86fd1931452b7802b3f1e57aa8..49148825a973aecca5901ffac2249fed6057f4d0 100644
--- a/src/webui/grafana_dashboard.json
+++ b/src/webui/grafana_dashboard.json
@@ -193,19 +193,19 @@
             "tags": [
               {
                 "key": "device_id",
-                "operator": "=~",
+                "operator": "=",
                 "value": "/^$device_id$/"
               },
               {
                 "condition": "AND",
                 "key": "endpoint_id",
-                "operator": "=~",
+                "operator": "=",
                 "value": "/^$endpoint_id$/"
               },
               {
                 "condition": "AND",
                 "key": "kpi_sample_type",
-                "operator": "=~",
+                "operator": "=",
                 "value": "/^$kpi_sample_type$/"
               }
             ]
@@ -236,7 +236,7 @@
             ]
           },
           "datasource": null,
-          "definition": "SHOW TAG VALUES FROM samples WITH KEY=\"device_id\"",
+          "definition": "SELECT DISTINCT device_id FROM monitoring;",
           "description": null,
           "error": null,
           "hide": 0,
@@ -245,7 +245,7 @@
           "multi": true,
           "name": "device_id",
           "options": [],
-          "query": "SHOW TAG VALUES FROM samples WITH KEY=\"device_id\"",
+          "query": "SELECT DISTINCT device_id FROM monitoring;",
           "refresh": 2,
           "regex": "",
           "skipUrlSync": false,
@@ -264,7 +264,7 @@
             ]
           },
           "datasource": null,
-          "definition": "SHOW TAG VALUES FROM samples WITH KEY=\"endpoint_id\" WHERE \"device_id\"=~/^$device_id$/",
+          "definition": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
           "description": null,
           "error": null,
           "hide": 0,
@@ -273,7 +273,7 @@
           "multi": true,
           "name": "endpoint_id",
           "options": [],
-          "query": "SHOW TAG VALUES FROM samples WITH KEY=\"endpoint_id\" WHERE \"device_id\"=~/^$device_id$/",
+          "query": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
           "refresh": 2,
           "regex": "",
           "skipUrlSync": false,
@@ -292,7 +292,7 @@
             ]
           },
           "datasource": null,
-          "definition": "SHOW TAG VALUES FROM samples WITH KEY=\"kpi_sample_type\"",
+          "definition": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
           "description": null,
           "error": null,
           "hide": 0,
@@ -301,7 +301,7 @@
           "multi": true,
           "name": "kpi_sample_type",
           "options": [],
-          "query": "SHOW TAG VALUES FROM samples WITH KEY=\"kpi_sample_type\"",
+          "query": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
           "refresh": 2,
           "regex": "",
           "skipUrlSync": false,
diff --git a/src/webui/grafana_dashboard_psql.json b/src/webui/grafana_dashboard_psql.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa2676e26a0336c8279a658dbbdabaafa9c6b4d0
--- /dev/null
+++ b/src/webui/grafana_dashboard_psql.json
@@ -0,0 +1,313 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "id": null,
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": {
+            "type": "datasource",
+            "uid": "grafana"
+          },
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "target": {
+            "limit": 100,
+            "matchAny": false,
+            "tags": [],
+            "type": "dashboard"
+          },
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "fiscalYearStartMonth": 0,
+    "graphTooltip": 0,
+    "iteration": 1664814762635,
+    "links": [],
+    "liveNow": false,
+    "panels": [
+      {
+        "datasource": {
+          "type": "postgres",
+          "uid": "monitoringdb"
+        },
+        "fieldConfig": {
+          "defaults": {
+            "color": {
+              "mode": "palette-classic"
+            },
+            "custom": {
+              "axisLabel": "",
+              "axisPlacement": "auto",
+              "barAlignment": 0,
+              "drawStyle": "line",
+              "fillOpacity": 0,
+              "gradientMode": "none",
+              "hideFrom": {
+                "legend": false,
+                "tooltip": false,
+                "viz": false
+              },
+              "lineInterpolation": "smooth",
+              "lineWidth": 1,
+              "pointSize": 5,
+              "scaleDistribution": {
+                "type": "linear"
+              },
+              "showPoints": "always",
+              "spanNulls": true,
+              "stacking": {
+                "group": "A",
+                "mode": "none"
+              },
+              "thresholdsStyle": {
+                "mode": "off"
+              }
+            },
+            "mappings": [],
+            "thresholds": {
+              "mode": "absolute",
+              "steps": [
+                {
+                  "color": "green",
+                  "value": null
+                },
+                {
+                  "color": "red",
+                  "value": 80
+                }
+              ]
+            }
+          },
+          "overrides": [
+            {
+              "matcher": {
+                "id": "byRegexp",
+                "options": ".*PACKETS_.*"
+              },
+              "properties": [
+                {
+                  "id": "custom.axisPlacement",
+                  "value": "left"
+                },
+                {
+                  "id": "unit",
+                  "value": "pps"
+                },
+                {
+                  "id": "custom.axisLabel",
+                  "value": "Packets / sec"
+                },
+                {
+                  "id": "custom.axisSoftMin",
+                  "value": 0
+                }
+              ]
+            },
+            {
+              "matcher": {
+                "id": "byRegexp",
+                "options": ".*BYTES_.*"
+              },
+              "properties": [
+                {
+                  "id": "custom.axisPlacement",
+                  "value": "right"
+                },
+                {
+                  "id": "unit",
+                  "value": "Bps"
+                },
+                {
+                  "id": "custom.axisLabel",
+                  "value": "Bytes / sec"
+                },
+                {
+                  "id": "custom.axisSoftMin",
+                  "value": 0
+                }
+              ]
+            }
+          ]
+        },
+        "gridPos": {
+          "h": 19,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "id": 2,
+        "options": {
+          "legend": {
+            "calcs": [
+              "first",
+              "min",
+              "mean",
+              "max",
+              "lastNotNull"
+            ],
+            "displayMode": "table",
+            "placement": "right"
+          },
+          "tooltip": {
+            "mode": "multi",
+            "sort": "none"
+          }
+        },
+        "targets": [
+          {
+            "datasource": {
+              "type": "postgres",
+              "uid": "monitoringdb"
+            },
+            "format": "time_series",
+            "group": [],
+            "hide": false,
+            "metricColumn": "kpi_value",
+            "rawQuery": true,
+            "rawSql": "SELECT\r\n  $__time(timestamp), kpi_value AS metric, device_id, endpoint_id, kpi_sample_type\r\nFROM\r\n  monitoring\r\nWHERE\r\n  $__timeFilter(timestamp) AND device_id IN ($device_id) AND endpoint_id IN ($endpoint_id) AND kpi_sample_type IN ($kpi_sample_type)\r\nGROUP BY\r\n  device_id, endpoint_id, kpi_sample_type\r\nORDER BY\r\n  timestamp\r\n",
+            "refId": "A",
+            "select": [
+              [
+                {
+                  "params": [
+                    "kpi_value"
+                  ],
+                  "type": "column"
+                }
+              ]
+            ],
+            "table": "monitoring",
+            "timeColumn": "timestamp",
+            "where": [
+              {
+                "name": "",
+                "params": [
+                  "device_id",
+                  "IN",
+                  "$device_id"
+                ],
+                "type": "expression"
+              }
+            ]
+          }
+        ],
+        "title": "L3 Monitoring Packets/Bytes Received/Sent",
+        "transformations": [
+          {
+            "id": "renameByRegex",
+            "options": {
+              "regex": "metric {device_id=\\\"([^\\\"]+)\\\", endpoint_id=\\\"([^\\\"]+)\\\", kpi_sample_type=\\\"([^\\\"]+)\\\"}",
+              "renamePattern": "$3 ($1 $2)"
+            }
+          }
+        ],
+        "type": "timeseries"
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 36,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": {
+            "type": "postgres",
+            "uid": "monitoringdb"
+          },
+          "definition": "SELECT DISTINCT device_id FROM monitoring;",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Device",
+          "multi": true,
+          "name": "device_id",
+          "options": [],
+          "query": "SELECT DISTINCT device_id FROM monitoring;",
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "type": "query"
+        },
+        {
+          "current": {
+            "selected": false,
+            "text": "All",
+            "value": "$__all"
+          },
+          "datasource": {
+            "type": "postgres",
+            "uid": "monitoringdb"
+          },
+          "definition": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
+          "hide": 0,
+          "includeAll": true,
+          "label": "EndPoint",
+          "multi": true,
+          "name": "endpoint_id",
+          "options": [],
+          "query": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "type": "query"
+        },
+        {
+          "current": {
+            "selected": true,
+            "text": [
+              "PACKETS_RECEIVED",
+              "PACKETS_TRANSMITTED"
+            ],
+            "value": [
+              "PACKETS_RECEIVED",
+              "PACKETS_TRANSMITTED"
+            ]
+          },
+          "datasource": {
+            "type": "postgres",
+            "uid": "monitoringdb"
+          },
+          "definition": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Kpi Sample Type",
+          "multi": true,
+          "name": "kpi_sample_type",
+          "options": [],
+          "query": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "type": "query"
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "L3 Monitoring",
+    "uid": "tf-l3-monit",
+    "version": 1,
+    "weekStart": ""
+  }
+}
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index 9187d90e76acd256bcac752ce7e7be025889e133..75e1036420d0bc88a790fb7b65f4f4900abaaadd 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -72,11 +72,15 @@ def create_app(use_config=None, web_app_root=None):
     from webui.service.service.routes import service
     app.register_blueprint(service)
 
+    from webui.service.slice.routes import slice
+    app.register_blueprint(slice)
+
     from webui.service.device.routes import device
     app.register_blueprint(device)
 
     from webui.service.link.routes import link
     app.register_blueprint(link)
+    
 
     app.jinja_env.filters['from_json'] = from_json
     
diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py
index 04c4b1de59283832b17c92c91727fa716a2c0fea..51e903d9ec28c5aaac20cd49e2f97dd7044e12bf 100644
--- a/src/webui/service/link/routes.py
+++ b/src/webui/service/link/routes.py
@@ -12,10 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from flask import render_template, Blueprint, flash, session, redirect, url_for
-from common.proto.context_pb2 import Empty, LinkList
+
+from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for
+from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId
 from context.client.ContextClient import ContextClient
 
+
 link = Blueprint('link', __name__, url_prefix='/link')
 context_client = ContextClient()
 
@@ -32,4 +34,13 @@ def home():
     return render_template(
         "link/home.html",
         links=response.links,
-    )
\ No newline at end of file
+    )
+
+@link.route('detail/<path:link_uuid>', methods=('GET', 'POST'))
+def detail(link_uuid: str):
+    request = LinkId()
+    request.link_uuid.uuid = link_uuid
+    context_client.connect()
+    response = context_client.GetLink(request)
+    context_client.close()
+    return render_template('link/detail.html',link=response)
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 85d3aeeb7c6f23ab4123412173cdfda4d27b23a4..d115444487c7356a541f3c567e5dea183da73ade 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -12,53 +12,162 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import copy, json, logging
+from typing import Optional
 from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
-from common.proto.context_pb2 import Context, Device, Empty, Link, Topology, ContextIdList
+from common.proto.context_pb2 import Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
 from webui.service.main.forms import ContextForm, DescriptorForm
 
 main = Blueprint('main', __name__)
 
 context_client = ContextClient()
 device_client = DeviceClient()
+service_client = ServiceClient()
+slice_client = SliceClient()
 
 logger = logging.getLogger(__name__)
 
-def process_descriptor(item_name_singluar, item_name_plural, grpc_method, grpc_class, items):
+ENTITY_TO_TEXT = {
+    # name   => singular,    plural
+    'context'   : ('Context',    'Contexts'   ),
+    'topology'  : ('Topology',   'Topologies' ),
+    'device'    : ('Device',     'Devices'    ),
+    'link'      : ('Link',       'Links'      ),
+    'service'   : ('Service',    'Services'   ),
+    'slice'     : ('Slice',      'Slices'     ),
+    'connection': ('Connection', 'Connections'),
+}
+
+ACTION_TO_TEXT = {
+    # action =>  infinitive,  past
+    'add'     : ('Add',       'Added'),
+    'update'  : ('Update',    'Updated'),
+    'config'  : ('Configure', 'Configured'),
+}
+
+def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities):
+    entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name]
+    action_infinitive, action_past = ACTION_TO_TEXT[action_name]
     num_ok, num_err = 0, 0
-    for item in items:
+    for entity in entities:
         try:
-            grpc_method(grpc_class(**item))
+            grpc_method(grpc_class(**entity))
             num_ok += 1
         except Exception as e: # pylint: disable=broad-except
-            flash(f'Unable to add {item_name_singluar} {str(item)}: {str(e)}', 'error')
+            flash(f'Unable to {action_infinitive} {entity_name_singluar} {str(entity)}: {str(e)}', 'error')
             num_err += 1
-    if num_ok : flash(f'{str(num_ok)} {item_name_plural} added', 'success')
-    if num_err: flash(f'{str(num_err)} {item_name_plural} failed', 'danger')
+    if num_ok : flash(f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')
+    if num_err: flash(f'{str(num_err)} {entity_name_plural} failed', 'danger')
 
 def process_descriptors(descriptors):
-    logger.warning(str(descriptors.data))
-    logger.warning(str(descriptors.name))
     try:
-        logger.warning(str(request.files))
         descriptors_file = request.files[descriptors.name]
-        logger.warning(str(descriptors_file))
         descriptors_data = descriptors_file.read()
-        logger.warning(str(descriptors_data))
         descriptors = json.loads(descriptors_data)
-        logger.warning(str(descriptors))
     except Exception as e: # pylint: disable=broad-except
         flash(f'Unable to load descriptor file: {str(e)}', 'danger')
         return
 
+    dummy_mode  = descriptors.get('dummy_mode' , False)
+    contexts    = descriptors.get('contexts'   , [])
+    topologies  = descriptors.get('topologies' , [])
+    devices     = descriptors.get('devices'    , [])
+    links       = descriptors.get('links'      , [])
+    services    = descriptors.get('services'   , [])
+    slices      = descriptors.get('slices'     , [])
+    connections = descriptors.get('connections', [])
+
+    if dummy_mode:
+        # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
+        context_client.connect()
+
+        contexts_add = copy.deepcopy(contexts)
+        for context in contexts_add:
+            context['topology_ids'] = []
+            context['service_ids'] = []
+
+        topologies_add = copy.deepcopy(topologies)
+        for topology in topologies_add:
+            topology['device_ids'] = []
+            topology['link_ids'] = []
+
+        process_descriptor('context',    'add',    context_client.SetContext,    Context,    contexts_add  )
+        process_descriptor('topology',   'add',    context_client.SetTopology,   Topology,   topologies_add)
+        process_descriptor('device',     'add',    context_client.SetDevice,     Device,     devices       )
+        process_descriptor('link',       'add',    context_client.SetLink,       Link,       links         )
+        process_descriptor('service',    'add',    context_client.SetService,    Service,    services      )
+        process_descriptor('context',    'update', context_client.SetContext,    Context,    contexts      )
+        process_descriptor('topology',   'update', context_client.SetTopology,   Topology,   topologies    )
+        process_descriptor('slice',      'add',    context_client.SetSlice,      Slice,      slices        )
+        process_descriptor('connection', 'add',    context_client.SetConnection, Connection, connections   )
+        context_client.close()
+        return
+
+    # Normal mode: follows the automated workflows in the different components
+
+    # in normal mode, connections should not be set
+    assert len(connections) == 0
+
+    devices_add = []
+    devices_config = []
+    for device in devices:
+        connect_rules = []
+        config_rules = []
+        for config_rule in device.get('device_config', {}).get('config_rules', []):
+            custom_resource_key : Optional[str] = config_rule.get('custom', {}).get('resource_key')
+            if custom_resource_key is not None and custom_resource_key.startswith('_connect/'):
+                connect_rules.append(config_rule)
+            else:
+                config_rules.append(config_rule)
+
+        if len(connect_rules) > 0:
+            device_add = copy.deepcopy(device)
+            device_add['device_endpoints'] = []
+            device_add['device_config'] = {'config_rules': connect_rules}
+            devices_add.append(device_add)
+
+        if len(config_rules) > 0:
+            device['device_config'] = {'config_rules': config_rules}
+            devices_config.append(device)
+
+    services_add = []
+    for service in services:
+        service_copy = copy.deepcopy(service)
+        service_copy['service_endpoint_ids'] = []
+        service_copy['service_constraints'] = []
+        service_copy['service_config'] = {'config_rules': []}
+        services_add.append(service_copy)
+
+    slices_add = []
+    for slice in slices:
+        slice_copy = copy.deepcopy(slice)
+        slice_copy['slice_endpoint_ids'] = []
+        slice_copy['slice_constraints'] = []
+        slice_copy['slice_config'] = {'config_rules': []}
+        slices_add.append(slice_copy)
+
     context_client.connect()
     device_client.connect()
-    process_descriptor('Context',  'Contexts',   context_client.SetContext,  Context,  descriptors['contexts'  ])
-    process_descriptor('Topology', 'Topologies', context_client.SetTopology, Topology, descriptors['topologies'])
-    process_descriptor('Device',   'Devices',    device_client .AddDevice,   Device,   descriptors['devices'   ])
-    process_descriptor('Link',     'Links',      context_client.SetLink,     Link,     descriptors['links'     ])
+    service_client.connect()
+    slice_client.connect()
+
+    process_descriptor('context',  'add',    context_client.SetContext,      Context,  contexts      )
+    process_descriptor('topology', 'add',    context_client.SetTopology,     Topology, topologies    )
+    process_descriptor('device',   'add',    device_client .AddDevice,       Device,   devices_add   )
+    process_descriptor('device',   'config', device_client .ConfigureDevice, Device,   devices_config)
+    process_descriptor('link',     'add',    context_client.SetLink,         Link,     links         )
+    process_descriptor('service',  'add',    service_client.CreateService,   Service,  services_add  )
+    process_descriptor('service',  'update', service_client.UpdateService,   Service,  services      )
+    process_descriptor('slice',    'add',    slice_client  .CreateSlice,     Slice,    slices_add    )
+    process_descriptor('slice',    'update', slice_client  .UpdateSlice,     Slice,    slices        )
+
+    slice_client.close()
+    service_client.close()
     device_client.close()
     context_client.close()
 
@@ -69,14 +178,18 @@ def home():
     response: ContextIdList = context_client.ListContextIds(Empty())
     context_form: ContextForm = ContextForm()
     context_form.context.choices.append(('', 'Select...'))
+
     for context in response.context_ids:
         context_form.context.choices.append((context.context_uuid.uuid, context.context_uuid))
+
     if context_form.validate_on_submit():
         session['context_uuid'] = context_form.context.data
         flash(f'The context was successfully set to `{context_form.context.data}`.', 'success')
         return redirect(url_for("main.home"))
+
     if 'context_uuid' in session:
         context_form.context.data = session['context_uuid']
+
     descriptor_form: DescriptorForm = DescriptorForm()
     try:
         if descriptor_form.validate_on_submit():
@@ -88,6 +201,7 @@ def home():
     finally:
         context_client.close()
         device_client.close()
+
     return render_template('main/home.html', context_form=context_form, descriptor_form=descriptor_form)
 
 @main.route('/topology', methods=['GET'])
@@ -102,11 +216,17 @@ def topology():
         } for device in response.devices]
 
         response = context_client.ListLinks(Empty())
-        links = [{
-            'id': link.link_id.link_uuid.uuid,
-            'source': link.link_endpoint_ids[0].device_id.device_uuid.uuid,
-            'target': link.link_endpoint_ids[1].device_id.device_uuid.uuid,
-        } for link in response.links]
+        links = []
+        for link in response.links:
+            if len(link.link_endpoint_ids) != 2:
+                str_link = grpc_message_to_json_string(link)
+                logger.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link))
+                continue
+            links.append({
+                'id': link.link_id.link_uuid.uuid,
+                'source': link.link_endpoint_ids[0].device_id.device_uuid.uuid,
+                'target': link.link_endpoint_ids[1].device_id.device_uuid.uuid,
+            })
 
         return jsonify({'devices': devices, 'links': links})
     except:
diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py
index 81031490ef840ff63262444a5487932a4e72c111..bc05daee3e4ff8795c26bed9e0707b9a3ab2be7c 100644
--- a/src/webui/service/service/routes.py
+++ b/src/webui/service/service/routes.py
@@ -14,7 +14,7 @@
 
 import grpc
 from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for
-from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceList, ServiceTypeEnum, ServiceStatusEnum
+from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceList, ServiceTypeEnum, ServiceStatusEnum, Connection
 from context.client.ContextClient import ContextClient
 from service.client.ServiceClient import ServiceClient
 
@@ -73,12 +73,14 @@ def detail(service_uuid: str):
     try:
         context_client.connect()
         response: Service = context_client.GetService(request)
+        connections: Connection = context_client.ListConnections(request)
         context_client.close()
     except Exception as e:
         flash('The system encountered an error and cannot show the details of this service.', 'warning')
         current_app.logger.exception(e)
         return redirect(url_for('service.home'))
-    return render_template('service/detail.html', service=response)
+    return render_template('service/detail.html', service=response, connections=connections,ste=ServiceTypeEnum,
+                                                sse=ServiceStatusEnum)
 
 
 @service.get('<path:service_uuid>/delete')
@@ -100,4 +102,4 @@ def delete(service_uuid: str):
     except Exception as e:
         flash('Problem deleting service "{:s}": {:s}'.format(service_uuid, str(e.details())), 'danger')
         current_app.logger.exception(e)
-    return redirect(url_for('service.home'))
+    return redirect(url_for('service.home'))
\ No newline at end of file
diff --git a/src/webui/service/slice/__init__.py b/src/webui/service/slice/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/webui/service/slice/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/webui/service/slice/routes.py b/src/webui/service/slice/routes.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5287501362db88edaf334426ca6e6d0e3331ef2
--- /dev/null
+++ b/src/webui/service/slice/routes.py
@@ -0,0 +1,103 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import grpc
+from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for
+from common.proto.context_pb2 import ContextId, Slice, SliceId, SliceList, Connection, SliceStatusEnum
+from context.client.ContextClient import ContextClient
+#from slice.client.SliceClient import SliceClient
+
+
+
+slice = Blueprint('slice', __name__, url_prefix='/slice')
+
+context_client = ContextClient()
+#slice_client = SliceClient()
+
+@slice.get('/')
+def home():
+    # flash('This is an info message', 'info')
+    # flash('This is a danger message', 'danger')
+
+    context_uuid = session.get('context_uuid', '-')
+    if context_uuid == "-":
+        flash("Please select a context!", "warning")
+        return redirect(url_for("main.home"))
+    request = ContextId()
+    request.context_uuid.uuid = context_uuid
+    context_client.connect()
+    try:
+        slice_list = context_client.ListSlices(request)
+        # print(slice_list)
+        slices = slice_list.slices
+        context_not_found = False
+    except grpc.RpcError as e:
+        if e.code() != grpc.StatusCode.NOT_FOUND: raise
+        if e.details() != 'Context({:s}) not found'.format(context_uuid): raise
+        slices = []
+        context_not_found = True
+    context_client.close()
+    return render_template('slice/home.html',slices=slices, context_not_found=context_not_found, sse=SliceStatusEnum)
+
+#
+#@slice.route('add', methods=['GET', 'POST'])
+#def add():
+#    flash('Add slice route called', 'danger')
+#    raise NotImplementedError()
+#    return render_template('slice/home.html')
+#
+#
+@slice.get('<path:slice_uuid>/detail')
+def detail(slice_uuid: str):
+    context_uuid = session.get('context_uuid', '-')
+    if context_uuid == "-":
+        flash("Please select a context!", "warning")
+        return redirect(url_for("main.home"))
+    
+    request: SliceId = SliceId()
+    request.slice_uuid.uuid = slice_uuid
+    request.context_id.context_uuid.uuid = context_uuid
+    req = ContextId()
+    req.context_uuid.uuid = context_uuid
+    try:
+        context_client.connect()
+        response: Slice = context_client.GetSlice(request)
+        services = context_client.ListServices(req)
+        context_client.close()
+    except Exception as e:
+        flash('The system encountered an error and cannot show the details of this slice.', 'warning')
+        current_app.logger.exception(e)
+        return redirect(url_for('slice.home'))
+    return render_template('slice/detail.html', slice=response, sse=SliceStatusEnum, services=services)
+#
+#@slice.get('<path:slice_uuid>/delete')
+#def delete(slice_uuid: str):
+#    context_uuid = session.get('context_uuid', '-')
+#    if context_uuid == "-":
+#        flash("Please select a context!", "warning")
+#        return redirect(url_for("main.home"))
+#
+#    try:
+#        request = SliceId()
+#        request.slice_uuid.uuid = slice_uuid
+#        request.context_id.context_uuid.uuid = context_uuid
+#        slice_client.connect()
+#        response = slice_client.DeleteSlice(request)
+#        slice_client.close()
+#
+#        flash('Slice "{:s}" deleted successfully!'.format(slice_uuid), 'success')
+#    except Exception as e:
+#        flash('Problem deleting slice "{:s}": {:s}'.format(slice_uuid, str(e.details())), 'danger')
+#        current_app.logger.exception(e) 
+#    return redirect(url_for('slice.home'))
\ No newline at end of file
diff --git a/src/webui/service/static/TeraFlow SDN Logo ScreenColour with Slogan.png b/src/webui/service/static/TeraFlow SDN Logo ScreenColour with Slogan.png
new file mode 100644
index 0000000000000000000000000000000000000000..218cc713c0a2704f96371fdd2916ef16b44cf667
Binary files /dev/null and b/src/webui/service/static/TeraFlow SDN Logo ScreenColour with Slogan.png differ
diff --git a/src/webui/service/static/topology_icons/datacenter.png b/src/webui/service/static/topology_icons/datacenter.png
new file mode 100644
index 0000000000000000000000000000000000000000..33818cf87e0f47fb6fd45b45c46f368f62ab78d2
Binary files /dev/null and b/src/webui/service/static/topology_icons/datacenter.png differ
diff --git a/src/webui/service/static/topology_icons/emu-datacenter.png b/src/webui/service/static/topology_icons/emu-datacenter.png
new file mode 100644
index 0000000000000000000000000000000000000000..ed2cc7376b481815edb48fb6faaa025289cfc3ca
Binary files /dev/null and b/src/webui/service/static/topology_icons/emu-datacenter.png differ
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index d314acb3d5cbe607e82474be7e66302f3d620d6a..5d7801d11880e89869120985307c6b43416f5a05 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -1,160 +1,167 @@
-<!doctype html>
-<!--
- Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
 
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<html lang="en">
-  <head>
-    <!-- Required meta tags -->
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-
-    <link rel="shortcut icon" href="https://teraflow-h2020.eu/sites/teraflow/files/public/favicon.png" type="image/png" />
-
-    <!-- Bootstrap CSS -->
-    <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-uWxY/CJNBR+1zjPWmfnSnVxwRheevXITnMqoEIeG1LJrdI0GlVs/9cVSyPYXdcSF" crossorigin="anonymous">
-    <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.5.0/font/bootstrap-icons.css">
-
-    <title>TeraFlow OFC 2022 Demo</title>
-  </head>
-  <body>
-      <div id="teraflow-branding" style="width: 260px; margin: 7px;">
-        <a href="{{ url_for('main.home') }}" title="Home" rel="home" id="main-logo" class="site-logo site-logo-pages">
-            <svg id="Capa_1" data-name="Capa 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 436.3 132.1"><defs><style>.cls-1{fill:#36a9e1;}.cls-2{fill:#1d71b8;}.cls-3{fill:none;stroke-width:2.52px;}.cls-10,.cls-3,.cls-4,.cls-5,.cls-7,.cls-8,.cls-9{stroke:#0f77b6;}.cls-3,.cls-4,.cls-8{stroke-miterlimit:10;}.cls-10,.cls-4,.cls-5,.cls-7,.cls-8,.cls-9{fill:#fff;}.cls-4{stroke-width:0.73px;}.cls-5,.cls-7{stroke-miterlimit:10;}.cls-5{stroke-width:0.75px;}.cls-6{fill:#0f77b6;}.cls-7{stroke-width:0.72px;}.cls-8{stroke-width:0.7px;}.cls-9{stroke-miterlimit:10;stroke-width:0.69px;}.cls-10{stroke-miterlimit:10;stroke-width:0.7px;}</style></defs><path class="cls-1" d="M96,57V51.3h44.1V57H121v52.3h-5.9V57Z"></path><path class="cls-1" d="M168.9,95.1l4.7,2.4a26,26,0,0,1-5.3,7.3,22.27,22.27,0,0,1-6.7,4.2,22.64,22.64,0,0,1-8.5,1.4c-7,0-12.5-2.3-16.4-6.9a23.53,23.53,0,0,1-5.9-15.6,23,23,0,0,1,5-14.5c4.2-5.4,9.9-8.1,17-8.1,7.3,0,13.2,2.8,17.5,8.3,3.1,3.9,4.7,8.8,4.7,14.7H136.4a17.48,17.48,0,0,0,4.8,12.3,15.26,15.26,0,0,0,11.4,4.8,20,20,0,0,0,6.4-1.1,19.3,19.3,0,0,0,5.3-3A33.07,33.07,0,0,0,168.9,95.1Zm0-11.6a18.66,18.66,0,0,0-3.2-7.1,15.25,15.25,0,0,0-5.6-4.3,16.87,16.87,0,0,0-7.3-1.6,16.06,16.06,0,0,0-10.9,4.1,18.15,18.15,0,0,0-5,8.9Z"></path><path class="cls-1" d="M182,66.4h5.6v6.3a20,20,0,0,1,5.3-5.5,10.67,10.67,0,0,1,5.8-1.8,9.87,9.87,0,0,1,4.9,1.5l-2.9,4.7a7.52,7.52,0,0,0-2.9-.7,8.09,8.09,0,0,0-5.3,2.3,14.64,14.64,0,0,0-3.9,7c-.7,2.4-1,7.4-1,14.8v14.5H182Z"></path><path class="cls-1" d="M246.2,66.4v42.9h-5.4V102a23.11,23.11,0,0,1-7.8,6.3,21.23,21.23,0,0,1-9.4,2.1,21,21,0,0,1-15.6-6.6,23.07,23.07,0,0,1,.1-32,21.23,21.23,0,0,1,15.7-6.6,20,20,0,0,1,17.1,8.9V66.2h5.3Zm-22.1,4.2a16.67,16.67,0,0,0-8.5,2.3,15.93,15.93,0,0,0-6.2,6.4,17.68,17.68,0,0,0-2.3,8.7,18.26,18.26,0,0,0,2.3,8.7,15.93,15.93,0,0,0,6.2,6.4,16.58,16.58,0,0,0,8.4,2.3,17.59,17.59,0,0,0,8.6-2.3,15.42,15.42,0,0,0,6.2-6.2,17.17,17.17,0,0,0,2.2-8.8,16.73,16.73,0,0,0-4.9-12.4A15.8,15.8,0,0,0,224.1,70.6Z"></path><path class="cls-2" d="M259.5,51.3h29.1V57H265.3V75.2h23.3v5.7H265.3v28.5h-5.8V51.3Z"></path><path class="cls-2" d="M296.9,49.9h5.5v59.5h-5.5Z"></path><path class="cls-2" d="M330.5,65.3a21.1,21.1,0,0,1,16.4,7.2A22.55,22.55,0,0,1,352.8,88a22.24,22.24,0,0,1-6.3,15.7c-4.2,4.5-9.5,6.7-16.1,6.7s-12-2.2-16.1-6.7A22.24,22.24,0,0,1,308,88a22.73,22.73,0,0,1,5.9-15.5A21.81,21.81,0,0,1,330.5,65.3Zm0,5.4a15.83,15.83,0,0,0-11.8,5.1,17,17,0,0,0-4.9,12.3,17.68,17.68,0,0,0,2.3,8.7,15.19,15.19,0,0,0,6.1,6.2,16.48,16.48,0,0,0,8.4,2.2A16,16,0,0,0,339,103a15.82,15.82,0,0,0,6.1-6.2,17.68,17.68,0,0,0,2.3-8.7,17.07,17.07,0,0,0-5-12.3A16.2,16.2,0,0,0,330.5,70.7Z"></path><path class="cls-2" d="M351.2,66.4h5.7L370,97.6l13.7-31.1h1l13.8,31.1,13.4-31.1h5.7L399,109.3h-1L384.3,78.6l-13.7,30.7h-1Z"></path><polyline class="cls-3" points="51 105 51 41.2 27 41.2"></polyline><polyline class="cls-3" points="38.1 33.8 56.4 33.8 56.4 93"></polyline><polyline class="cls-3" points="79.9 33.8 61.5 33.8 61.5 79.2"></polyline><polyline class="cls-3" points="90.7 41.2 66.7 41.2 66.7 105"></polyline><line class="cls-3" x1="83.1" y1="62.6" x2="66.7" y2="62.6"></line><circle class="cls-4" cx="27" cy="41.2" r="5.3"></circle><path class="cls-1" d="M23.3,41.2a3.8,3.8,0,1,0,3.8-3.8A3.8,3.8,0,0,0,23.3,41.2Z"></path><circle class="cls-5" cx="51" cy="105" r="5.4"></circle><path class="cls-1" d="M47.3,105a3.8,3.8,0,1,0,3.8-3.8A3.8,3.8,0,0,0,47.3,105Z"></path><circle class="cls-6" cx="56.36" cy="93.02" r="3.4"></circle><circle class="cls-6" cx="61.5" cy="79.2" r="2.8"></circle><circle class="cls-7" cx="66.7" cy="105.01" r="5.3"></circle><path class="cls-1" d="M63,105a3.8,3.8,0,1,0,3.8-3.8A3.8,3.8,0,0,0,63,105Z"></path><circle class="cls-8" cx="90.7" cy="41.2" r="5.1"></circle><path class="cls-1" d="M87,41.2a3.8,3.8,0,1,0,3.8-3.8A3.8,3.8,0,0,0,87,41.2Z"></path><circle class="cls-8" cx="84.7" cy="62.6" r="5.1"></circle><path class="cls-1" d="M81,62.6a3.8,3.8,0,1,0,3.8-3.8A3.8,3.8,0,0,0,81,62.6Z"></path><line class="cls-3" x1="34.8" y1="62.6" x2="51.1" y2="62.6"></line><circle class="cls-8" cx="33.1" cy="62.6" r="5.1"></circle><path class="cls-1" d="M36.9,62.6a3.8,3.8,0,1,1-3.8-3.8A3.8,3.8,0,0,1,36.9,62.6Z"></path><line class="cls-3" x1="23.7" y1="26.7" x2="94.1" y2="26.7"></line><circle class="cls-9" cx="94.09" cy="26.67" r="5"></circle><path class="cls-1" d="M90.3,26.7a3.8,3.8,0,1,0,3.8-3.8A3.8,3.8,0,0,0,90.3,26.7Z"></path><circle class="cls-6" cx="78" cy="33.8" r="3.8"></circle><circle class="cls-6" cx="40" cy="33.8" r="3.8"></circle><circle class="cls-10" cx="23.71" cy="26.71" r="5.1"></circle><path class="cls-1" d="M20,26.7a3.8,3.8,0,1,0,3.8-3.8A3.8,3.8,0,0,0,20,26.7Z"></path></svg>
-          </a>
-      </div>
-
-    <nav class="navbar navbar-expand-lg navbar-dark bg-primary" style="margin-bottom: 10px;">
-        <div class="container-fluid">
-          <a class="navbar-brand" href="{{ url_for('main.home') }}">
-            <img src="https://teraflow-h2020.eu/sites/teraflow/files/public/favicon.png" alt="" width="30" height="24" class="d-inline-block align-text-top"/>
-            TeraFlow
+  <!doctype html>
+  <!--
+   Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+  
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+  
+        http://www.apache.org/licenses/LICENSE-2.0
+  
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+  -->
+  
+  <html lang="en">
+    <head>
+      <!-- Required meta tags -->
+      <meta charset="utf-8">
+      <meta name="viewport" content="width=device-width, initial-scale=1">
+  
+      <link rel="shortcut icon" href="https://tfs.etsi.org/images/logos/tfs_logo_small.png" type="image/png" />
+  
+      <!-- Bootstrap CSS -->
+      <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-uWxY/CJNBR+1zjPWmfnSnVxwRheevXITnMqoEIeG1LJrdI0GlVs/9cVSyPYXdcSF" crossorigin="anonymous">
+      <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.5.0/font/bootstrap-icons.css">
+  
+      <title>ETSI TeraFlowSDN Controller</title>
+    </head>
+    <body>
+        <div id="teraflow-branding">
+          <a href="{{ url_for('main.home') }}" title="Home" rel="home" id="main-logo" class="site-logo site-logo-pages">
+            <img src="{{ url_for('static', filename='TeraFlow SDN Logo ScreenColour with Slogan.png') }}" width="400" type="image/png"> 
           </a>
-          <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarColor02" aria-controls="navbarColor02" aria-expanded="false" aria-label="Toggle navigation">
-            <span class="navbar-toggler-icon"></span>
-          </button>
-          <div class="collapse navbar-collapse" id="navbarColor02">
-            <ul class="navbar-nav me-auto mb-2 mb-lg-0">
-              <li class="nav-item">
-                {% if request.path == '/' %}
-                <a class="nav-link active" aria-current="page" href="{{ url_for('main.home') }}">Home</a>
-                {% else %}
-                <a class="nav-link" href="{{ url_for('main.home') }}">Home</a>
-                {% endif %}
-              </li>
-              <li class="nav-item">
-                {% if '/device/' in request.path %}
-                <a class="nav-link active" aria-current="page" href="{{ url_for('device.home') }}">Device</a>
-                {% else %}
-                <a class="nav-link" href="{{ url_for('device.home') }}">Device</a>
-                {% endif %}
-              </li>
-              <li class="nav-item">
-                {% if '/link/' in request.path %}
-                <a class="nav-link active" aria-current="page" href="{{ url_for('link.home') }}">Link</a>
-                {% else %}
-                <a class="nav-link" href="{{ url_for('link.home') }}">Link</a>
-                {% endif %}
-              </li>
-              <li class="nav-item">
-                {% if '/service/' in request.path %}
-                <a class="nav-link active" aria-current="page" href="{{ url_for('service.home') }}">Service</a>
-                {% else %}
-                <a class="nav-link" href="{{ url_for('service.home') }}">Service</a>
-                {% endif %}
-              </li>
-
-              <li class="nav-item">
-                <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
-              </li>
-
-              <li class="nav-item">
-                <a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
-              </li>
-
-              <!-- <li class="nav-item">
-                <a class="nav-link" href="#">Context</a>
-              </li>
-              
-              <li class="nav-item">
-                <a class="nav-link" href="#">Monitoring</a>
-              </li> -->
-              <li class="nav-item">
-                <a class="nav-link" href="{{ url_for('main.about') }}">About</a>
-              </li>
-            </ul>
-            <span class="navbar-text" style="color: #fff;">
-              Current context: <b>{{ get_working_context() }}</b>
-            </span>
-          </div>
         </div>
-      </nav>
-
-      <main class="container">
-        <div class="row">
-          <div class="col-md-12">
-            {% with messages = get_flashed_messages(with_categories=true) %}
-              {% if messages %}
-                {% for category, message in messages %}
-                  <div class="alert alert-{{ category }} alert-dismissible fade show" role="alert">
-                    {{ message }}
-                    <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>
-                  </div>
-      
-                {% endfor %}
-              {% endif %}
-            {% endwith %}
-          </div>
-        </div>
-        <div class="row">
-          <div class="col-xxl-12">
-          {% block content %}{% endblock %}
+  
+      <nav class="navbar navbar-expand-lg navbar-dark bg-primary" style="margin-bottom: 10px;">
+          <div class="container-fluid">
+            <a class="navbar-brand" href="{{ url_for('main.home') }}">
+              <img src="https://teraflow-h2020.eu/sites/teraflow/files/public/favicon.png" alt="" width="30" height="24" class="d-inline-block align-text-top"/>
+              TeraFlow
+            </a>
+            <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarColor02" aria-controls="navbarColor02" aria-expanded="false" aria-label="Toggle navigation">
+              <span class="navbar-toggler-icon"></span>
+            </button>
+            <div class="collapse navbar-collapse" id="navbarColor02">
+              <ul class="navbar-nav me-auto mb-2 mb-lg-0">
+                <li class="nav-item">
+                  {% if request.path == '/' %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('main.home') }}">Home</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('main.home') }}">Home</a>
+                  {% endif %}
+                </li>
+                <li class="nav-item">
+                  {% if '/device/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('device.home') }}">Device</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('device.home') }}">Device</a>
+                  {% endif %}
+                </li>
+                <li class="nav-item">
+                  {% if '/link/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('link.home') }}">Link</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('link.home') }}">Link</a>
+                  {% endif %}
+                </li>
+                <li class="nav-item">
+                  {% if '/service/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('service.home') }}">Service</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('service.home') }}">Service</a>
+                  {% endif %}
+                </li>
+                <li class="nav-item">
+                  {% if '/slice/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('slice.home') }}">Slice</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('slice.home') }}">Slice</a>
+                  {% endif %}
+                </li>
+                <li class="nav-item">
+                  <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
+                </li>
+  
+                <li class="nav-item">
+                  <a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
+                </li>
+  
+                <!-- <li class="nav-item">
+                  <a class="nav-link" href="#">Context</a>
+                </li>
+                
+                <li class="nav-item">
+                  <a class="nav-link" href="#">Monitoring</a>
+                </li> -->
+                <li class="nav-item">
+                  <a class="nav-link" href="{{ url_for('main.about') }}">About</a>
+                </li>
+              </ul>
+              <span class="navbar-text" style="color: #fff;">
+                Current context: <b>{{ get_working_context() }}</b>
+              </span>
+            </div>
           </div>
-        </div>
-      </main>
-
-      <footer class="footer" style="background-color: darkgrey; margin-top: 30px; padding-top: 20px;">
-        <div class="container">
+        </nav>
+  
+        <main class="container">
           <div class="row">
             <div class="col-md-12">
-              <p class="text-center" style="color: white;">&copy; 2021-2023</p>
+              {% with messages = get_flashed_messages(with_categories=true) %}
+                {% if messages %}
+                  {% for category, message in messages %}
+                    <div class="alert alert-{{ category }} alert-dismissible fade show" role="alert">
+                      {{ message }}
+                      <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>
+                    </div>
+        
+                  {% endfor %}
+                {% endif %}
+              {% endwith %}
             </div>
           </div>
           <div class="row">
-            <div class="col-md-6">
-              <p>This project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No 101015857.</p>
+            <div class="col-xxl-12">
+            {% block content %}{% endblock %}
+            </div>
+          </div>
+        </main>
+  
+        <footer class="footer" style="background-color: darkgrey; margin-top: 30px; padding-top: 20px;">
+          <div class="container">
+            <div class="row">
+              <div class="col-md-12">
+                <p class="text-center" style="color: white;">&copy; 2021-2023</p>
+              </div>
             </div>
-            <div class="col-md-6">
-              <img src="https://teraflow-h2020.eu/sites/teraflow/files/public/content-images/media/2021/logo%205G-ppp%20eu.png" width="310" alt="5g ppp EU logo" loading="lazy" typeof="foaf:Image">
+            <div class="row">
+              <div class="col-md-6">
+                <p>This project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No 101015857.</p>
+              </div>
+              <div class="col-md-6">
+                <img src="https://teraflow-h2020.eu/sites/teraflow/files/public/content-images/media/2021/logo%205G-ppp%20eu.png" width="310" alt="5g ppp EU logo" loading="lazy" typeof="foaf:Image">
+              </div>
             </div>
           </div>
-        </div>
-      </footer>
-
-    <!-- Optional JavaScript; choose one of the two! -->
-
-    <!-- Option 1: Bootstrap Bundle with Popper -->
-    <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/js/bootstrap.bundle.min.js" integrity="sha384-kQtW33rZJAHjgefvhyyzcGF3C5TFyBQBA13V1RKPf4uH+bwyzQxZ6CmMZHmNBEfJ" crossorigin="anonymous"></script>
-    <!-- <script src="{{ url_for('static', filename='site.js') }}"/> -->
-    <!-- <script>
-      document.getElementById("grafana_link").href = window.location.protocol + "//" + window.location.hostname + ":30300"
-    </script> -->
-    <!-- Option 2: Separate Popper and Bootstrap JS -->
-    <!--
-    <script src="https://cdn.jsdelivr.net/npm/@popperjs/core@2.10.2/dist/umd/popper.min.js" integrity="sha384-7+zCNj/IqJ95wo16oMtfsKbZ9ccEh31eOz1HGyDuCQ6wgnyJNSYdrPa03rtR1zdB" crossorigin="anonymous"></script>
-    <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/js/bootstrap.min.js" integrity="sha384-PsUw7Xwds7x08Ew3exXhqzbhuEYmA2xnwc8BuD6SEr+UmEHlX8/MCltYEodzWA4u" crossorigin="anonymous"></script>
-    -->
-  </body>
-</html>
\ No newline at end of file
+        </footer>
+  
+      <!-- Optional JavaScript; choose one of the two! -->
+  
+      <!-- Option 1: Bootstrap Bundle with Popper -->
+      <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/js/bootstrap.bundle.min.js" integrity="sha384-kQtW33rZJAHjgefvhyyzcGF3C5TFyBQBA13V1RKPf4uH+bwyzQxZ6CmMZHmNBEfJ" crossorigin="anonymous"></script>
+      <!-- <script src="{{ url_for('static', filename='site.js') }}"/> -->
+      <!-- <script>
+        document.getElementById("grafana_link").href = window.location.protocol + "//" + window.location.hostname + ":30300"
+      </script> -->
+      <!-- Option 2: Separate Popper and Bootstrap JS -->
+      <!--
+      <script src="https://cdn.jsdelivr.net/npm/@popperjs/core@2.10.2/dist/umd/popper.min.js" integrity="sha384-7+zCNj/IqJ95wo16oMtfsKbZ9ccEh31eOz1HGyDuCQ6wgnyJNSYdrPa03rtR1zdB" crossorigin="anonymous"></script>
+      <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/js/bootstrap.min.js" integrity="sha384-PsUw7Xwds7x08Ew3exXhqzbhuEYmA2xnwc8BuD6SEr+UmEHlX8/MCltYEodzWA4u" crossorigin="anonymous"></script>
+      -->
+    </body>
+  </html>
\ No newline at end of file
diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html
index b4cf6b715250d3e96b5026c3e19758a2be9a9607..69ca93727310db7f89034f56510ceb5df504083f 100644
--- a/src/webui/service/templates/device/detail.html
+++ b/src/webui/service/templates/device/detail.html
@@ -1,111 +1,130 @@
 <!--
- Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+    Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+   
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+   
+         http://www.apache.org/licenses/LICENSE-2.0
+   
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+   -->
+   
+   {% extends 'base.html' %}
+   
+   {% block content %}
+       <h1>Device {{ device.device_id.device_uuid.uuid }}</h1>
+   
+       <div class="row mb-3">
+           <div class="col-sm-3">
+               <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('device.home') }}'">
+                   <i class="bi bi-box-arrow-in-left"></i>
+                   Back to device list
+               </button>
+           </div>
+           <div class="col-sm-3">
+               <a id="update" class="btn btn-secondary" href="#">
+                   <i class="bi bi-pencil-square"></i>
+                   Update
+               </a>
+           </div>
+           <div class="col-sm-3">
+               <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete device</button> -->
+               <button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
+                   <i class="bi bi-x-square"></i>Delete device
+                 </button>
+           </div>
+       </div>
 
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-{% extends 'base.html' %}
-
-{% block content %}
-    <h1>Device {{ device.device_id.device_uuid.uuid }}</h1>
-
-    <div class="row mb-3">
-        <div class="col-sm-3">
-            <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('device.home') }}'">
-                <i class="bi bi-box-arrow-in-left"></i>
-                Back to device list
-            </button>
+       <br>
+       <div class="row mb-3">
+            <div class="col-sm-4">
+                <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br><br>
+                <b>Type: </b>{{ device.device_type }}<br><br>
+                <b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br>
+                <b>Drivers: </b>
+                <ul>
+                    {% for driver in device.device_drivers %}
+                    <li>{{ dde.Name(driver).replace('DEVICEDRIVER_', '').replace('UNDEFINED', 'EMULATED') }}</li>
+                    {% endfor %}
+                </ul>
+            </div>
+            <div class="col-sm-8">
+                    <table class="table table-striped table-hover">
+                        <thead>
+                            <tr>
+                                <th scope="col">Endpoints</th>
+                                <th scope="col">Type</th>
+                            </tr>
+                        </thead>
+                        <tbody>
+                            {% for endpoint in device.device_endpoints %}
+                            <tr>
+                                <td>
+                                    {{ endpoint.endpoint_id.endpoint_uuid.uuid }}
+                                </td>
+                                <td>
+                                    {{ endpoint.endpoint_type }}
+                                </td>
+                            </tr>
+                            {% endfor %}
+                        </tbody>
+                    </table>
+                </div> 
+            </div>
         </div>
-        <div class="col-sm-3">
-            <a id="update" class="btn btn-secondary" href="#">
-                <i class="bi bi-pencil-square"></i>
-                Update
-            </a>
-        </div>
-        <div class="col-sm-3">
-            <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete device</button> -->
-            <button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
-                <i class="bi bi-x-square"></i>Delete device
-              </button>
-        </div>
-    </div>
 
-    <div class="row mb-3">
-        <div class="col-sm-1"><b>UUID:</b></div>
-        <div class="col-sm-5">
-            {{ device.device_id.device_uuid.uuid }}
-        </div>
-        <div class="col-sm-1"><b>Type:</b></div>
-        <div class="col-sm-5">
-            {{ device.device_type }}
-        </div>
-    </div>
-    <div class="row mb-3">
-        <div class="col-sm-1"><b>Drivers:</b></div>
-        <div class="col-sm-11">
-            <ul>
-                {% for driver in device.device_drivers %}
-                <li>{{ dde.Name(driver).replace('DEVICEDRIVER_', '').replace('UNDEFINED', 'EMULATED') }}</li>
-                {% endfor %}
-            </ul>
-        </div>
-    </div>
-    <div class="row mb-3">
-        <b>Endpoints:</b>
-        <div class="col-sm-10">
-            <ul>
-            {% for endpoint in device.device_endpoints %}
-                <li>{{ endpoint.endpoint_id.endpoint_uuid.uuid }}: {{ endpoint.endpoint_type }}</li>
-            {% endfor %}
-            </ul>
-        </div>
-    </div>
-    <div class="row mb-3">
         <b>Configurations:</b>
-        <div class="col-sm-10">
-            <ul>
-            {% for config in device.device_config.config_rules %}
+        <table class="table table-striped table-hover">
+            <thead>
+                <tr>
+                    <th scope="col">Key</th>
+                    <th scope="col">Value</th>
+                </tr>
+            </thead>
+            <tbody>
+                {% for config in device.device_config.config_rules %}
                 {% if config.WhichOneof('config_rule') == 'custom' %}
-                <li>{{ config.custom.resource_key }}:
-                    <ul>
-                        {% for key, value in (config.custom.resource_value | from_json).items() %}
-                        <li><b>{{ key }}:</b> {{ value }}</li>
-                        {% endfor %}
-                    </ul>
-                </li>
+                <tr>
+                    <td>
+                        {{ config.custom.resource_key }}
+                    </td>
+                    <td>
+                        <ul>
+                            {% for key, value in (config.custom.resource_value | from_json).items() %}
+                            <li><b>{{ key }}:</b> {{ value }}</li>
+                            {% endfor %}
+                        </ul>
+                    </td>
+                </tr>
                 {% endif %}
-            {% endfor %}
-            </ul>
-        </div>
-    </div>
+                {% endfor %}
+            </tbody>
+        </table>
 
-    <!-- Modal -->
-<div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1" aria-labelledby="staticBackdropLabel" aria-hidden="true">
-    <div class="modal-dialog">
-      <div class="modal-content">
-        <div class="modal-header">
-          <h5 class="modal-title" id="staticBackdropLabel">Delete device?</h5>
-          <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
-        </div>
-        <div class="modal-body">
-          Are you sure you want to delete the device "{{ device.device_id.device_uuid.uuid }}"?
-        </div>
-        <div class="modal-footer">
-          <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
-          <a type="button" class="btn btn-danger" href="{{ url_for('device.delete', device_uuid=device.device_id.device_uuid.uuid) }}"><i class="bi bi-exclamation-diamond"></i>Yes</a>
-        </div>
-      </div>
-    </div>
-  </div>
 
-{% endblock %}
\ No newline at end of file
+       <!-- Modal -->
+   <div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1" aria-labelledby="staticBackdropLabel" aria-hidden="true">
+       <div class="modal-dialog">
+         <div class="modal-content">
+           <div class="modal-header">
+             <h5 class="modal-title" id="staticBackdropLabel">Delete device?</h5>
+             <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
+           </div>
+           <div class="modal-body">
+             Are you sure you want to delete the device "{{ device.device_id.device_uuid.uuid }}"?
+           </div>
+           <div class="modal-footer">
+             <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
+             <a type="button" class="btn btn-danger" href="{{ url_for('device.delete', device_uuid=device.device_id.device_uuid.uuid) }}"><i class="bi bi-exclamation-diamond"></i>Yes</a>
+           </div>
+         </div>
+       </div>
+     </div>
+   
+   {% endblock %}
+   
\ No newline at end of file
diff --git a/src/webui/service/templates/js/topology.js b/src/webui/service/templates/js/topology.js
index 05216fb98808d5b574d613344c63a7e19cb2c472..69de0445dac24bf2f7f16ec21da4a6d35133e9da 100644
--- a/src/webui/service/templates/js/topology.js
+++ b/src/webui/service/templates/js/topology.js
@@ -15,6 +15,15 @@
 // Based on:
 //   https://www.d3-graph-gallery.com/graph/network_basic.html
 //   https://bl.ocks.org/steveharoz/8c3e2524079a8c440df60c1ab72b5d03
+//   https://www.d3indepth.com/zoom-and-pan/
+
+// Pan & Zoom does not work; to be reviewed
+//<button onclick="zoomIn()">Zoom in</button>
+//<button onclick="zoomOut()">Zoom out</button>
+//<button onclick="resetZoom()">Reset zoom</button>
+//<button onclick="panLeft()">Pan left</button>
+//<button onclick="panRight()">Pan right</button>
+//<button onclick="center()">Center</button>
 
 // set the dimensions and margins of the graph
 const margin = {top: 5, right: 5, bottom: 5, left: 5};
@@ -22,16 +31,24 @@ const margin = {top: 5, right: 5, bottom: 5, left: 5};
 const icon_width  = 40;
 const icon_height = 40;
 
-width = 800 - margin.left - margin.right;
-height = 500 - margin.top - margin.bottom;
+width = 1000 - margin.left - margin.right;
+height = 600 - margin.top - margin.bottom;
+
+//function handleZoom(e) {
+//    console.dir(e);
+//    d3.select('svg g').attr('transform', e.transform);
+//}
+//let zoom = d3.zoom().scaleExtent([0.01, 10]).translateExtent([[0, 0], [width, height]]).on('zoom', handleZoom);
 
 // append the svg object to the body of the page
 const svg = d3.select('#topology')
     .append('svg')
         .attr('width', width + margin.left + margin.right)
         .attr('height', height + margin.top + margin.bottom)
+        //.call(zoom)
     .append('g')
-        .attr('transform', `translate(${margin.left}, ${margin.top})`);
+        .attr('transform', `translate(${margin.left}, ${margin.top})`)
+        ;
 
 // svg objects
 var link, node;
@@ -148,3 +165,25 @@ d3.select(window).on("resize", function(){
     height = +svg.node().getBoundingClientRect().height;
     simulation.alpha(1).restart();
 });
+
+///******************** UI ACTIONS *******************/
+//
+//function resetZoom() {
+//    d3.select('svg').transition().call(zoom.scaleTo, 1.0);
+//}
+//function zoomIn()    {
+//    d3.select('svg').transition().call(zoom.scaleBy, 2.0);
+//}
+//function zoomOut()   {
+//    d3.select('svg').transition().call(zoom.scaleBy, 0.5);
+//}
+//
+//function center()    {
+//    d3.select('svg').transition().call(zoom.translateTo, 0.5 * width, 0.5 * height);
+//}
+//function panLeft()   {
+//    d3.select('svg').transition().call(zoom.translateBy, -50, 0);
+//}
+//function panRight()  {
+//    d3.select('svg').transition().call(zoom.translateBy,  50, 0);
+//}
diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html
new file mode 100644
index 0000000000000000000000000000000000000000..7df9ddce6bdddd511f3b50313cafa1374990b99e
--- /dev/null
+++ b/src/webui/service/templates/link/detail.html
@@ -0,0 +1,65 @@
+<!--
+    Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+   
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+   
+         http://www.apache.org/licenses/LICENSE-2.0
+   
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+   -->
+   {% extends 'base.html' %}
+   
+   {% block content %}
+    <h1>Link {{ link.link_id.link_uuid.uuid }}</h1>
+    <div class="row mb-3">
+          <div class="col-sm-3">
+               <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('link.home') }}'">
+                    <i class="bi bi-box-arrow-in-left"></i>
+                    Back to link list
+               </button>
+          </div>
+     </div>
+
+     <br>
+       <div class="row mb-3">
+            <div class="col-sm-4">
+                <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br><br>
+            </div>
+            <div class="col-sm-8">
+                    <table class="table table-striped table-hover">
+                        <thead>
+                            <tr>
+                                <th scope="col">Endpoints</th>
+                                <th scope="col">Device</th>
+                            </tr>
+                        </thead>
+                        <tbody>
+                              {% for end_point in link.link_endpoint_ids %}
+                              <tr>
+                                   <td>
+                                        {{ end_point.endpoint_uuid.uuid }} 
+                                   </td>
+                                   <td>
+                                        <a href="{{ url_for('device.detail', device_uuid=end_point.device_id.device_uuid.uuid) }}">
+                                             {{ end_point.device_id.device_uuid.uuid }}
+                                             <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                                 <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                                 <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                                             </svg>
+                                        </a>
+                                   </td>
+                              </tr>
+                              {% endfor %}
+                        </tbody>
+                    </table>
+            </div>
+        </div>
+
+   {% endblock %}
+   
\ No newline at end of file
diff --git a/src/webui/service/templates/link/home.html b/src/webui/service/templates/link/home.html
index d0c122f6aafd0de8e2937be056d1c2e787c91710..77d00d34185ac45ada0ed6d8e9915c0b2f3ad9c0 100644
--- a/src/webui/service/templates/link/home.html
+++ b/src/webui/service/templates/link/home.html
@@ -1,96 +1,96 @@
 <!--
- Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-{% extends 'base.html' %}
-
-{% block content %}
-    <h1>Links</h1>
-
-    <div class="row">
-        <div class="col">
-            <!-- <a href="#" class="btn btn-primary" style="margin-bottom: 10px;">
-                <i class="bi bi-plus"></i>
-                Add New Link
-            </a> -->
-        </div>
-        <div class="col">
-            {{ links | length }} links found</i>
-        </div>
-        <!-- <div class="col">
-            <form>
-                <div class="input-group">
-                    <input type="text" aria-label="Search" placeholder="Search..." class="form-control"/>
-                    <button type="submit" class="btn btn-primary">Search</button>
-                  </div>
-            </form>
-        </div> -->
-    </div>
-
-    <table class="table table-striped table-hover">
-        <thead>
-          <tr>
-            <th scope="col">#</th>
-            <th scope="col">Endpoints</th>
-            <th scope="col"></th>
-          </tr>
-        </thead>
-        <tbody>
-            {% if links %}
-                {% for link in links %}
-                <tr>
-                    <td>
-                        <!-- <a href="#"> -->
-                            {{ link.link_id.link_uuid.uuid }}
-                        <!-- </a> -->
-                    </td>
-
-                    <td>
-                        <ul>
-                            {% for end_point in link.link_endpoint_ids %}
-                            <li>
-                                {{ end_point.endpoint_uuid.uuid }} / 
-                                Device: 
-                                <a href="{{ url_for('device.detail', device_uuid=end_point.device_id.device_uuid.uuid) }}">
-                                    {{ end_point.device_id.device_uuid.uuid }}
-                                    <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
-                                        <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
-                                        <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
-                                    </svg>
-                                </a>
-                            </li>
-                            {% endfor %}
-                        </ul>
-                    </td>
-
-                    <td>
-                        <!-- <a href="#">
-                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
-                                <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
-                                <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
-                            </svg>
-                        </a> -->
-                    </td>
-                </tr>
-                {% endfor %}
-            {% else %}
-                <tr>
-                    <td colspan="7">No links found</td>
-                </tr>
-            {% endif %}
-        </tbody>
-    </table>
-
-{% endblock %}
\ No newline at end of file
+    Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+   
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+   
+         http://www.apache.org/licenses/LICENSE-2.0
+   
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+   -->
+   
+   {% extends 'base.html' %}
+   
+   {% block content %}
+       <h1>Links</h1>
+   
+       <div class="row">
+           <div class="col">
+               <!-- <a href="#" class="btn btn-primary" style="margin-bottom: 10px;">
+                   <i class="bi bi-plus"></i>
+                   Add New Link
+               </a> -->
+           </div>
+           <div class="col">
+               {{ links | length }} links found</i>
+           </div>
+           <!-- <div class="col">
+               <form>
+                   <div class="input-group">
+                       <input type="text" aria-label="Search" placeholder="Search..." class="form-control"/>
+                       <button type="submit" class="btn btn-primary">Search</button>
+                     </div>
+               </form>
+           </div> -->
+       </div>
+   
+       <table class="table table-striped table-hover">
+           <thead>
+             <tr>
+               <th scope="col">#</th>
+               <th scope="col">Endpoints</th>
+               <th scope="col"></th>
+             </tr>
+           </thead>
+           <tbody>
+               {% if links %}
+                   {% for link in links %}
+                   <tr>
+                       <td>
+                           <!-- <a href="#"> -->
+                               {{ link.link_id.link_uuid.uuid }}
+                           <!-- </a> -->
+                       </td>
+   
+                       <td>
+                           <ul>
+                               {% for end_point in link.link_endpoint_ids %}
+                               <li>
+                                   {{ end_point.endpoint_uuid.uuid }} / 
+                                   Device: 
+                                   <a href="{{ url_for('device.detail', device_uuid=end_point.device_id.device_uuid.uuid) }}">
+                                       {{ end_point.device_id.device_uuid.uuid }}
+                                       <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                           <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                           <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                                       </svg>
+                                   </a>
+                               </li>
+                               {% endfor %}
+                           </ul>
+                       </td>
+   
+                       <td> 
+                            <a href="{{ url_for('link.detail', link_uuid=link.link_id.link_uuid.uuid) }}">
+                               <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                   <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                   <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                               </svg> 
+                           </a>
+                       </td>
+                   </tr>
+                   {% endfor %}
+               {% else %}
+                   <tr>
+                       <td colspan="7">No links found</td>
+                   </tr>
+               {% endif %}
+           </tbody>
+       </table>
+   
+   {% endblock %}
\ No newline at end of file
diff --git a/src/webui/service/templates/main/about.html b/src/webui/service/templates/main/about.html
index 4ba3a5845b0e8e70b029d4ec459733468899698b..80d61891ce95ff096308ed903da294bbf23c5070 100644
--- a/src/webui/service/templates/main/about.html
+++ b/src/webui/service/templates/main/about.html
@@ -16,10 +16,10 @@
 
 {% extends 'base.html' %}
 {% block content %}
-    <h1>TeraFlow OS</h1>
+    <h1>ETSI TeraFlowSDN Controller</h1>
 
-    <p>For more information, visit the <a href="https://teraflow-h2020.eu/" target="_newtf">TeraFlow H2020 webpage</a>.</p>
+    <p>For more information, visit the <a href="https://tfs.etsi.org/" target="_newtf">ETSI Open Source Group for TeraFlowSDN</a>.</p>
 
-    <img alt="Consortium" class="img-fluid" src="{{ url_for('static', filename='partners.png') }}"/>
+    <!--<img alt="Consortium" class="img-fluid" src="{{ url_for('static', filename='partners.png') }}"/>-->
 
 {% endblock %}
\ No newline at end of file
diff --git a/src/webui/service/templates/main/home.html b/src/webui/service/templates/main/home.html
index 3cc9fbcffce6cfbb6ebb40dec9d3359f59df5a15..db390939ff926b5bbfbfc6507b0f4e79695f3693 100644
--- a/src/webui/service/templates/main/home.html
+++ b/src/webui/service/templates/main/home.html
@@ -17,7 +17,7 @@
 {% extends 'base.html' %}
 
 {% block content %}
-    <h1>TeraFlow OS SDN Controller</h1>
+    <h2>ETSI TeraFlowSDN Controller</h2>
 
     {% for field, message in context_form.errors.items() %}
         <div class="alert alert-dismissible fade show" role="alert">
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html
index 1e58b9eaad3155524808f60b49840edab7f17739..94581019e3be34511b43630759dc237780db0f41 100644
--- a/src/webui/service/templates/service/detail.html
+++ b/src/webui/service/templates/service/detail.html
@@ -17,85 +17,224 @@
 {% extends 'base.html' %}
 
 {% block content %}
-    <h1>Service {{ service.service_id.service_uuid.uuid }}</h1>
+<h1>Service {{ service.service_id.service_uuid.uuid }}</h1>
 
-    <div class="row mb-3">
-        <div class="col-sm-3">
-            <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('service.home') }}'">
-                <i class="bi bi-box-arrow-in-left"></i>
-                Back to service list
-            </button>
-        </div>
-        <div class="col-sm-3">
-            <a id="update" class="btn btn-secondary" href="#">
-                <i class="bi bi-pencil-square"></i>
-                Update
-            </a>
-        </div>
-        <div class="col-sm-3">
-            <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete service</button> -->
-            <button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
-                <i class="bi bi-x-square"></i>Delete service
-              </button>
-        </div>
+<div class="row mb-3">
+    <div class="col-sm-3">
+        <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('service.home') }}'">
+            <i class="bi bi-box-arrow-in-left"></i>
+            Back to service list
+        </button>
     </div>
+    <!--
+    <div class="col-sm-3">
+        <a id="update" class="btn btn-secondary" href="#">
+            <i class="bi bi-pencil-square"></i>
+            Update
+        </a>
+    </div>
+    <div class="col-sm-3">-->
+        <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete service</button> -->
+        <!--<button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
+            <i class="bi bi-x-square"></i>Delete service
+        </button>
+    </div>
+    -->
+</div>
 
-    <div class="row mb-3">
-        <div class="col-sm-1"><b>UUID:</b></div>
-        <div class="col-sm-5">
-            {{ service.service_id.service_uuid.uuid }}
-        </div>
-        <div class="col-sm-1"><b>Type:</b></div>
-        <div class="col-sm-5">
-            {{ service.service_type }}
-        </div>
+<div class="row mb-3">
+    <div class="col-sm-4">
+        <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br><br>
+        <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br><br>
+        <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br><br>
     </div>
-    <div class="row mb-3">
-        <b>Endpoints:</b>
-        <div class="col-sm-10">
-            <ul>
-            {% for endpoint in service.service_endpoint_ids %}
-                <li>{{ endpoint.endpoint_uuid.uuid }}: {{ endpoint.endpoint_type }}</li>
-            {% endfor %}
-            </ul>
-        </div>
+    <div class="col-sm-8">
+        <table class="table table-striped table-hover">
+            <thead>
+                <tr>
+                    <th scope="col">Endpoints</th>
+                    <th scope="col">Device</th>
+                </tr>
+            </thead>
+            <tbody>
+                {% for endpoint in service.service_endpoint_ids %}
+                <tr>
+                    <td>
+                        {{ endpoint.endpoint_uuid.uuid }}
+                    </td>
+                    <td>
+                        <a href="{{ url_for('device.detail', device_uuid=endpoint.device_id.device_uuid.uuid) }}">
+                            {{ endpoint.device_id.device_uuid.uuid }}
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor"
+                                class="bi bi-eye" viewBox="0 0 16 16">
+                                <path
+                                    d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z" />
+                                <path
+                                    d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z" />
+                            </svg>
+                        </a>
+                    </td>
+                </tr>
+                {% endfor %}
+            </tbody>
+        </table>
     </div>
-    <div class="row mb-3">
-        <b>Configurations:</b>
-        <div class="col-sm-10">
-            <ul>
-            {% for config in service.service_config.config_rules %}
-                {% if config.WhichOneof('config_rule') == 'custom' %}
-                <li>{{ config.custom.resource_key }}:
-                    <ul>
-                        {% for key, value in (config.custom.resource_value | from_json).items() %}
-                        <li><b>{{ key }}:</b> {{ value }}</li>
-                        {% endfor %}
-                    </ul>
-                </li>
+</div>
+<b>Constraints:</b>
+<table class="table table-striped table-hover">
+    <thead>
+        <tr>
+            <th scope="col">Kind</th>
+            <th scope="col">Type</th>
+            <th scope="col">Value</th>
+        </tr>
+    </thead>
+    <tbody>
+        {% for constraint in service.service_constraints %}
+        {% if constraint.WhichOneof('constraint')=='custom' %}
+        <tr>
+            <td>Custom</td>
+            <td>{{ constraint.custom.constraint_type }}</td>
+            <td>{{ constraint.custom.constraint_value }}</td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='endpoint_location' %}
+        <tr>
+            <td>Endpoint Location</td>
+            <td>
+                {{ constraint.endpoint_location.endpoint_id.device_id.device_uuid.uuid }} / {{
+                constraint.endpoint_location.endpoint_id.endpoint_uuid.uuid }}
+            </td>
+            <td>
+                {% if constraint.endpoint_location.location.WhichOneof('location')=='region' %}
+                    Region: {{ constraint.endpoint_location.location.region }}
+                {% elif constraint.endpoint_location.location.WhichOneof('location')=='gps_position' %}
+                    Position (lat/long):
+                    {{ constraint.endpoint_location.location.gps_position.latitude }} /
+                    {{ constraint.endpoint_location.location.gps_position.longitude }}
                 {% endif %}
-            {% endfor %}
-            </ul>
-        </div>
-    </div>
-
-    <!-- Modal -->
-<div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1" aria-labelledby="staticBackdropLabel" aria-hidden="true">
+            </td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='endpoint_priority' %}
+        <tr>
+            <td>Endpoint Priority</td>
+            <td>
+                {{ constraint.endpoint_priority.endpoint_id.device_id.device_uuid.uuid }} / {{
+                constraint.endpoint_priority.endpoint_id.endpoint_uuid.uuid }}
+            </td>
+            <td>{{ constraint.endpoint_priority.priority }}</td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_availability' %}
+        <tr>
+            <td>SLA Availability</td>
+            <td>-</td>
+            <td>
+                {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
+                {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}active
+            </td>
+        </tr>
+        {% else %}
+        <tr>
+            <td>-</td>
+            <td>-</td>
+            <td>{{ constraint }}</td>
+        </tr>
+        {% endif %}
+        {% endfor %}
+    </tbody>
+</table>
+<b>Configurations:</b>
+<table class="table table-striped table-hover">
+    <thead>
+        <tr>
+            <th scope="col">Key</th>
+            <th scope="col">Value</th>
+        </tr>
+    </thead>
+    <tbody>
+        {% for config in service.service_config.config_rules %}
+        {% if config.WhichOneof('config_rule') == 'custom' %}
+        <tr>
+            <td>
+                {{ config.custom.resource_key }}
+            </td>
+            <td>
+                <ul>
+                    {% for key, value in (config.custom.resource_value | from_json).items() %}
+                    <li><b>{{ key }}:</b> {{ value }}</li>
+                    {% endfor %}
+                </ul>
+            </td>
+        </tr>
+        {% endif %}
+        {% endfor %}
+    </tbody>
+</table>
+<!-- Modal -->
+<div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1"
+    aria-labelledby="staticBackdropLabel" aria-hidden="true">
     <div class="modal-dialog">
-      <div class="modal-content">
-        <div class="modal-header">
-          <h5 class="modal-title" id="staticBackdropLabel">Delete service?</h5>
-          <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
-        </div>
-        <div class="modal-body">
-          Are you sure you want to delete the service "{{ service.service_id.service_uuid.uuid }}"?
-        </div>
-        <div class="modal-footer">
-          <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
-          <a type="button" class="btn btn-danger" href="{{ url_for('service.delete', service_uuid=service.service_id.service_uuid.uuid) }}"><i class="bi bi-exclamation-diamond"></i>Yes</a>
+        <div class="modal-content">
+            <div class="modal-header">
+                <h5 class="modal-title" id="staticBackdropLabel">Delete service?</h5>
+                <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
+            </div>
+            <div class="modal-body">
+                Are you sure you want to delete the service "{{ service.service_id.service_uuid.uuid }}"?
+            </div>
+            <div class="modal-footer">
+                <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
+                <a type="button" class="btn btn-danger"
+                    href="{{ url_for('service.delete', service_uuid=service.service_id.service_uuid.uuid) }}"><i
+                        class="bi bi-exclamation-diamond"></i>Yes</a>
+            </div>
         </div>
-      </div>
     </div>
-  </div>
+</div>
+
+
+<table class="table table-striped table-hover">
+    <thead>
+        <tr>
+            <th scope="col">Connection Id</th>
+            <th scope="col">Sub-service</th>
+            <th scope="col">Path</th>
+        </tr>
+    </thead>
+    <tbody>
+        {% for connection in connections.connections %}
+        <tr>
+            <td>
+                {{ connection.connection_id.connection_uuid.uuid }}
+            </td>
+            <td>
+                <ul>
+                {% for sub_service_id in connection.sub_service_ids %}
+                    <li>
+                        <a href="{{ url_for('service.detail', service_uuid=sub_service_id.service_uuid.uuid) }}">
+                            {{ sub_service_id.service_uuid.uuid }}
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                            </svg>
+                        </a>
+                    </li>
+                {% endfor %}
+                </ul>
+            </td>
+
+            {% for i in range(connection.path_hops_endpoint_ids|length) %}
+            <td>
+                {{ connection.path_hops_endpoint_ids[i].device_id.device_uuid.uuid }} / {{
+                connection.path_hops_endpoint_ids[i].endpoint_uuid.uuid }}
+            </td>
+            {% endfor %}
+        </tr>
+        {% endfor %}
+    </tbody>
+</table>
+
+
+
+
 
 {% endblock %}
\ No newline at end of file
diff --git a/src/webui/service/templates/service/home.html b/src/webui/service/templates/service/home.html
index 0e152006c149df35d477ecfb81bb4fcc0b562d9a..c0a01839bb519074526a4ed34669ebfdd3d8b8e4 100644
--- a/src/webui/service/templates/service/home.html
+++ b/src/webui/service/templates/service/home.html
@@ -46,7 +46,6 @@
             <th scope="col">#</th>
             <th scope="col">Type</th>
             <th scope="col">End points</th>
-            <th scope="col">Constraints</th>
             <th scope="col">Status</th>
             <th scope="col"></th>
           </tr>
@@ -70,14 +69,7 @@
                             {% endfor %}
                         </ul>
                     </td>
-                    <td>
-                        <ul>
-                            {% for constraint in service.service_constraints %}
-                            <li>{{ constraint.constraint_type }}: {{ constraint.constraint_value }}</li>
-                            {% endfor %}
-                        </ul>
-                    </td>
-                    <td>{{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}</td>
+                    <td>{{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }} </td>
                     <td>
                         <a href="{{ url_for('service.detail', service_uuid=service.service_id.service_uuid.uuid) }}">
                             <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
new file mode 100644
index 0000000000000000000000000000000000000000..936b0f08fb1b7def156e11f16bf552b8d60018be
--- /dev/null
+++ b/src/webui/service/templates/slice/detail.html
@@ -0,0 +1,221 @@
+<!--
+ Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+<h1>Slice {{ slice.slice_id.slice_uuid.uuid }} </h1>
+
+<div class="row mb-3">
+    <div class="col-sm-3">
+        <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('slice.home') }}'">
+            <i class="bi bi-box-arrow-in-left"></i>
+            Back to slice list
+        </button>
+    </div>
+    <!--
+    <div class="col-sm-3">
+        <a id="update" class="btn btn-secondary" href="#">
+            <i class="bi bi-pencil-square"></i>
+            Update
+        </a>
+    </div>
+    <div class="col-sm-3">-->
+        <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete slice</button> -->
+        <!--<button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
+            <i class="bi bi-x-square"></i>Delete slice
+        </button>
+    </div>
+    -->
+</div>
+
+<div class="row mb-3">
+    <div class="col-sm-4">
+        <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br><br>
+        <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br><br>
+    </div>
+    <div class="col-sm-8">
+        <table class="table table-striped table-hover">
+            <thead>
+                <tr>
+                    <th scope="col">Endpoints</th>
+                    <th scope="col">Device</th>
+                </tr>
+            </thead>
+            <tbody>
+                {% for endpoint in slice.slice_endpoint_ids %}
+                <tr>
+                    <td>
+                        {{ endpoint.endpoint_uuid.uuid }}
+                    </td>
+                    <td>
+                        <a href="{{ url_for('device.detail', device_uuid=endpoint.device_id.device_uuid.uuid) }}">
+                            {{ endpoint.device_id.device_uuid.uuid }}
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor"
+                                class="bi bi-eye" viewBox="0 0 16 16">
+                                <path
+                                    d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z" />
+                                <path
+                                    d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z" />
+                            </svg>
+                        </a>
+                    </td>
+                </tr>
+                {% endfor %}
+            </tbody>
+        </table>
+    </div>
+</div>
+<b>Constraints:</b>
+<table class="table table-striped table-hover">
+    <thead>
+        <tr>
+            <th scope="col">Kind</th>
+            <th scope="col">Type</th>
+            <th scope="col">Value</th>
+        </tr>
+    </thead>
+    <tbody>
+        {% for constraint in slice.slice_constraints %}
+        {% if constraint.WhichOneof('constraint')=='custom' %}
+        <tr>
+            <td>Custom</td>
+            <td>{{ constraint.custom.constraint_type }}</td>
+            <td>{{ constraint.custom.constraint_value }}</td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='endpoint_location' %}
+        <tr>
+            <td>Endpoint Location</td>
+            <td>
+                {{ constraint.endpoint_location.endpoint_id.device_id.device_uuid.uuid }} / {{
+                constraint.endpoint_location.endpoint_id.endpoint_uuid.uuid }}
+            </td>
+            <td>
+                {% if constraint.endpoint_location.location.WhichOneof('location')=='region' %}
+                    Region: {{ constraint.endpoint_location.location.region }}
+                {% elif constraint.endpoint_location.location.WhichOneof('location')=='gps_position' %}
+                    Position (lat/long):
+                    {{ constraint.endpoint_location.location.gps_position.latitude }} /
+                    {{ constraint.endpoint_location.location.gps_position.longitude }}
+                {% endif %}
+            </td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='endpoint_priority' %}
+        <tr>
+            <td>Endpoint Priority</td>
+            <td>
+                {{ constraint.endpoint_priority.endpoint_id.device_id.device_uuid.uuid }} / {{
+                constraint.endpoint_priority.endpoint_id.endpoint_uuid.uuid }}
+            </td>
+            <td>{{ constraint.endpoint_priority.priority }}</td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_availability' %}
+        <tr>
+            <td>SLA Availability</td>
+            <td>-</td>
+            <td>
+                {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
+                {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}active
+            </td>
+        </tr>
+        {% else %}
+        <tr>
+            <td>-</td>
+            <td>-</td>
+            <td>{{ constraint }}</td>
+        </tr>
+        {% endif %}
+        {% endfor %}
+    </tbody>
+</table>
+<b>Configurations:</b>
+<table class="table table-striped table-hover">
+    <thead>
+        <tr>
+            <th scope="col">Key</th>
+            <th scope="col">Value</th>
+        </tr>
+    </thead>
+    <tbody>
+        {% for config in slice.slice_config.config_rules %}
+        {% if config.WhichOneof('config_rule') == 'custom' %}
+        <tr>
+            <td>
+                {{ config.custom.resource_key }}
+            </td>
+            <td>
+                <ul>
+                    {% for key, value in (config.custom.resource_value | from_json).items() %}
+                    <li><b>{{ key }}:</b> {{ value }}</li>
+                    {% endfor %}
+                </ul>
+            </td>
+        </tr>
+        {% endif %}
+        {% endfor %}
+    </tbody>
+</table>
+<div class="row mb-2">
+    <div class="col-sm-6">
+        <table class="table table-striped table-hover">
+            <thead>
+                <tr>
+                    <th scope="col">Service Id</th>
+                </tr>
+            </thead>
+            <tbody>
+                {% for service_id in slice.slice_service_ids %}
+                <tr>
+                    <td>
+                        <a href="{{ url_for('service.detail', service_uuid=service_id.service_uuid.uuid) }}">
+                            {{ service_id.service_uuid.uuid }}
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                            </svg>
+                        </a>
+                    </td>
+                </tr>
+                {% endfor %}
+            </tbody>
+        </table>
+    </div>
+    <div class="col-sm-6">
+        <table class="table table-striped table-hover">
+            <thead>
+                <tr>
+                    <th scope="col">Sub-slices</th>
+                </tr>
+            </thead>
+            <tbody>
+                {% for subslice_id in slice.slice_subslice_ids %}
+                <tr>
+                    <td>
+                        <a href="{{ url_for('slice.detail', slice_uuid=subslice_id.slice_uuid.uuid) }}">
+                            {{ subslice_id.slice_uuid.uuid }}
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                            </svg>
+                        </a>
+                    </td>
+                </tr>
+                {% endfor %}
+            </tbody>
+        </table>
+    </div>
+</div>
+{% endblock %}
\ No newline at end of file
diff --git a/src/webui/service/templates/slice/home.html b/src/webui/service/templates/slice/home.html
new file mode 100644
index 0000000000000000000000000000000000000000..46a2b4f1a5b4aceb5e432b7b69563d20258fc152
--- /dev/null
+++ b/src/webui/service/templates/slice/home.html
@@ -0,0 +1,77 @@
+<!--
+ Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+    <h1>Slice</h1>
+
+    <div class="row">
+
+        <div class="col">
+            {{ slices | length }} slices found in context <i>{{ session['context_uuid'] }}</i>
+        </div>
+
+    </div>
+
+    
+    <table class="table table-striped table-hover">
+        <thead>
+          <tr>
+            <th scope="col">#</th>
+            <th scope="col">End points</th>
+            <th scope="col">Status</th>
+            <th scope="col"></th>
+            
+          </tr>
+        </thead>
+        <tbody>
+            {% if slices %}
+                {% for slice in slices %}
+                <tr>
+                    <td>
+                        {{ slice.slice_id.slice_uuid.uuid }}
+                    </td>
+                    <td>
+                        <ul>
+                        {% for i in range(slice.slice_endpoint_ids|length) %}
+                            <li> {{ slice.slice_endpoint_ids[i].device_id.device_uuid.uuid }} / {{ slice.slice_endpoint_ids[i].endpoint_uuid.uuid }} </li>
+                        {% endfor %}
+                        </ul>
+                    </td>
+                    <td>
+                        {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}
+                    </td>
+                    <td>
+                        <a href="{{ url_for('slice.detail', slice_uuid=slice.slice_id.slice_uuid.uuid) }}">
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                            </svg>
+                        </a>
+                    </td>
+                </tr>
+                {% endfor %}
+                {% else %}
+                <tr>
+                    <td colspan="7">No slices found</td>
+                </tr> 
+            
+            {% endif %}
+        </tbody>
+    </table>
+
+{% endblock %}
\ No newline at end of file
diff --git a/src/webui/tests/test_unitary.py b/src/webui/tests/test_unitary.py
index 945a60186e04cc1bd3ee7678b340e9321646df97..11cc77a460a94707c6226cdfc4ca747563e95f45 100644
--- a/src/webui/tests/test_unitary.py
+++ b/src/webui/tests/test_unitary.py
@@ -68,6 +68,7 @@ class TestWebUI(ClientTestCase):
         with self.app.app_context():
             url_for('main.home')
             url_for('service.home')
+            url_for('slice.home')
             url_for('device.home')
             url_for('link.home')
             #url_for('main.debug')
diff --git a/tutorial/1-3-deploy-tfs.md b/tutorial/1-3-deploy-tfs.md
index 07c79d7ab34f12b9042a38489752b28bd4fd474e..9b2da4fc1734fc08d0bb24621aadd067d7a29b97 100644
--- a/tutorial/1-3-deploy-tfs.md
+++ b/tutorial/1-3-deploy-tfs.md
@@ -58,7 +58,7 @@ password to be set for the Grafana `admin` user.
 cd ~/tfs-ctrl
 tee my_deploy.sh >/dev/null <<EOF
 export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
-export TFS_COMPONENTS="context device automation service compute monitoring webui"
+export TFS_COMPONENTS="context device automation pathcomp service slice compute monitoring webui"
 export TFS_IMAGE_TAG="dev"
 export TFS_K8S_NAMESPACE="tfs"
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
diff --git a/tutorial/2-0-run-experiments.md b/tutorial/2-0-run-experiments.md
index f87d00e98a66449f5fa6d267c527565b145722b2..82f6a56bf0481a4edeaf71251510f74c51138096 100644
--- a/tutorial/2-0-run-experiments.md
+++ b/tutorial/2-0-run-experiments.md
@@ -8,5 +8,5 @@ commands you might need, configuring the network topology, and executing differe
 - [2.1. Configure the Python environment](./2-1-python-environment.md)
 - [2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services](./2-2-ofc22.md)
 - [2.3. OECC/PSC'22 Demo (WORK IN PROGRESS)](./2-3-oeccpsc22.md)
-- [2.4. ECOC'22 Demo (PENDING)](./2-4-ecoc22.md)
+- [2.4. ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service (WORK IN PROGRESS)](./2-4-ecoc22.md)
 - [2.5. NFV-SDN'22 Demo (PENDING)](./2-5-nfvsdn22.md)
diff --git a/tutorial/2-4-ecoc22.md b/tutorial/2-4-ecoc22.md
index f752bda840a3eb2fbde6c907e4ce139de3f8ce82..6fc9333b58fe7c6da51be5eefe9167853508456a 100644
--- a/tutorial/2-4-ecoc22.md
+++ b/tutorial/2-4-ecoc22.md
@@ -1 +1,120 @@
-# 2.4. ECOC'22 Demo (PENDING)
+# 2.4. ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service (WORK IN PROGRESS)
+
+This functional test reproduces the experimental assessment of "Experimental Demonstration of Transport Network Slicing
+with SLA Using the TeraFlowSDN Controller" presented at [ECOC'22](https://www.ecoc2022.org/).
+
+## 2.4.1. Functional test folder
+
+This functional test can be found in folder `./src/tests/ecoc22/`. A convenience alias `./ecoc22/` pointing to that
+folder has been defined.
+
+## 2.4.2. Execute with real devices
+
+This functional test has only been tested with emulated devices; however, if you have access to real devices, you can
+modify the files `./ecoc22/tests/Objects.py` and `./ecoc22/tests/Credentials.py` to point to your devices, and map to
+your network topology.
+Otherwise, you can modify the `./ecoc22/tests/descriptors_emulated.json` that is designed to be uploaded through the
+WebUI instead of using the command line scripts.
+
+__Important__: The device drivers operating with real devices, e.g., OpenConfigDriver, P4Driver, and TransportApiDriver,
+               have to be considered as experimental. The configuration and monitoring capabilities they support are
+               limited or partially implemented/tested. Use them with care.
+
+
+## 2.4.3. Deployment and Dependencies
+
+To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN
+controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python
+environment as described in
+[Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
+Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal
+you open.
+Next, remember to source the environment variables created by the deployment, e.g.,
+`cd ~/tfs-ctrl && source tfs_runtime_env_vars.sh`.
+Then, re-build the protocol buffers code from the proto files:
+`./proto/generate_code_python.sh`
+
+
+
+## 2.4.4. Access to the WebUI and Dashboard
+
+When the deployment completes, you can connect to the TeraFlowSDN WebUI and Dashboards as described in
+[Tutorial: Deployment Guide > 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md)
+
+Notes:
+- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`.
+- this functional test does not involve the Monitoring component, so no monitoring data is plotted in Grafana.
+
+
+## 2.4.5. Test execution
+
+To execute this functional test, four main steps needs to be carried out:
+1. Device bootstrapping
+2. L3VPN Service creation
+3. L3VPN Service removal
+4. Cleanup
+
+Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there
+is some error during the execution, you should see a detailed report on the error. See the troubleshooting section if
+needed.
+
+You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts
+after you execute each step.
+
+
+### 2.4.5.1. Device bootstrapping
+
+This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The
+expected results are:
+- The devices to be added into the Topology.
+- The devices to be pre-configured and initialized as ENABLED by the Automation component.
+- The monitoring for the device ports (named as endpoints in TeraFlowSDN) to be activated and data collection to
+  automatically start.
+- The links to be added to the topology.
+
+To run this step, you can do it from the WebUI by uploading the file `./ecoc22/tests/descriptors_emulated.json` that
+contains the descriptors of the contexts, topologies, devices, and links, or by executing the
+`./ecoc22/run_test_01_bootstrap.sh` script.
+
+When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data
+being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a
+0-valued flat plot.
+
+In the WebUI, select the "admin" Context. Then, in the "Devices" tab you should see that 5 different emulated devices
+have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab
+you should see that there is no service created. Note here that the emulated devices produce synthetic
+randomly-generated data and do not care about the services configured.
+
+
+### 2.4.5.2. L3VPN Service creation
+
+This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance.
+
+To run this step, execute the `./ecoc22/run_test_02_create_service.sh` script.
+
+When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for
+the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration
+rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured,
+you should see the plots with the monitored data for the device. By default, device R1-EMU is selected.
+
+
+### 2.4.5.3. L3VPN Service removal
+
+This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock
+OSM instance.
+
+To run this step, execute the `./ecoc22/run_test_03_delete_service.sh` script, or delete the L3NM service from the WebUI.
+
+When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed.
+Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the
+Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again.
+
+
+### 2.4.5.4. Cleanup
+
+This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness.
+
+To run this step, execute the `./ecoc22/run_test_04_cleanup.sh` script.
+
+When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in
+the "Services" tab you can see that the "admin" Context has no services given that that context has been removed.
diff --git a/tutorial/3-2-develop-cth.md b/tutorial/3-2-develop-cth.md
index eda70c9e8c411c8cc6a0ed0832f573ca787962ca..1b2a4690a3177628e18a4ca6f77365f515d6dcc5 100644
--- a/tutorial/3-2-develop-cth.md
+++ b/tutorial/3-2-develop-cth.md
@@ -1,5 +1,18 @@
 # 3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS)
 
+## Building, running, testing and reporting code coverage locally
+
+The project runs a CI/CD loops that ensures that all tests are run whenever new code is committed to our reporitory.
+However, committing and waiting for the pipeline to run can take substantial time.
+For this reason, we prepared a script that runs in your local machine, builds the container image and executes the tests within the image.
+
+To use the script receives one argument that is the name of the component you want to run.
+For instance, if you want to build and run the tests of the `compute` component, you can run:
+
+```shell
+scripts/build_run_report_tests_locally.sh compute
+```
+
 
 
 ## Items to be addressed: